[Intel-gfx] [PATCH 1/3] drm/i915: Start splitting out i915_gem_object routines
Chris Wilson
chris at chris-wilson.co.uk
Mon Feb 27 13:31:43 UTC 2017
To begin with move obj->mm related operations to i915_gem_object.c, in
preparation for future tweaks.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/Makefile | 1 +
drivers/gpu/drm/i915/i915_drv.h | 107 ------
drivers/gpu/drm/i915/i915_gem.c | 385 +------------------
drivers/gpu/drm/i915/i915_gem_object.c | 412 +++++++++++++++++++++
drivers/gpu/drm/i915/i915_gem_object.h | 112 +++++-
.../selftests/{i915_gem_object.c => i915_gem.c} | 4 +-
.../gpu/drm/i915/selftests/i915_live_selftests.h | 2 +-
.../gpu/drm/i915/selftests/i915_mock_selftests.h | 2 +-
8 files changed, 529 insertions(+), 496 deletions(-)
create mode 100644 drivers/gpu/drm/i915/i915_gem_object.c
rename drivers/gpu/drm/i915/selftests/{i915_gem_object.c => i915_gem.c} (99%)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index d1d8ec49791c..1dbbcd3e23b1 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,6 +40,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_gtt.o \
i915_gem_internal.o \
i915_gem.o \
+ i915_gem_object.o \
i915_gem_render_state.o \
i915_gem_request.o \
i915_gem_shrinker.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3d76f9d16cc2..b88c6c58a860 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3285,113 +3285,6 @@ static inline int __sg_page_count(const struct scatterlist *sg)
return sg->length >> PAGE_SHIFT;
}
-struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n, unsigned int *offset);
-
-struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj,
- unsigned int n);
-
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n);
-
-dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n);
-
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages);
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
-
-static inline int __must_check
-i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
-{
- might_lock(&obj->mm.lock);
-
- if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
- return 0;
-
- return __i915_gem_object_get_pages(obj);
-}
-
-static inline void
-__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
-{
- GEM_BUG_ON(!obj->mm.pages);
-
- atomic_inc(&obj->mm.pages_pin_count);
-}
-
-static inline bool
-i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
-{
- return atomic_read(&obj->mm.pages_pin_count);
-}
-
-static inline void
-__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
-{
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(!obj->mm.pages);
-
- atomic_dec(&obj->mm.pages_pin_count);
-}
-
-static inline void
-i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
-{
- __i915_gem_object_unpin_pages(obj);
-}
-
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
- I915_MM_NORMAL = 0,
- I915_MM_SHRINKER
-};
-
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass);
-void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
-
-enum i915_map_type {
- I915_MAP_WB = 0,
- I915_MAP_WC,
-};
-
-/**
- * i915_gem_object_pin_map - return a contiguous mapping of the entire object
- * @obj: the object to map into kernel address space
- * @type: the type of mapping, used to select pgprot_t
- *
- * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
- * pages and then returns a contiguous mapping of the backing storage into
- * the kernel address space. Based on the @type of mapping, the PTE will be
- * set to either WriteBack or WriteCombine (via pgprot_t).
- *
- * The caller is responsible for calling i915_gem_object_unpin_map() when the
- * mapping is no longer required.
- *
- * Returns the pointer through which to access the mapped object, or an
- * ERR_PTR() on error.
- */
-void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
- enum i915_map_type type);
-
-/**
- * i915_gem_object_unpin_map - releases an earlier mapping
- * @obj: the object to unmap
- *
- * After pinning the object and mapping its pages, once you are finished
- * with your access, call i915_gem_object_unpin_map() to release the pin
- * upon the mapping. Once the pin count reaches zero, that mapping may be
- * removed.
- */
-static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_unpin_pages(obj);
-}
-
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush);
int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aeb46af196c2..0316215221f8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2190,57 +2190,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
kfree(pages);
}
-static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
-{
- struct radix_tree_iter iter;
- void **slot;
-
- radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
- radix_tree_delete(&obj->mm.get_page.radix, iter.index);
-}
-
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass)
-{
- struct sg_table *pages;
-
- if (i915_gem_object_has_pinned_pages(obj))
- return;
-
- GEM_BUG_ON(obj->bind_count);
- if (!READ_ONCE(obj->mm.pages))
- return;
-
- /* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock_nested(&obj->mm.lock, subclass);
- if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
- goto unlock;
-
- /* ->put_pages might need to allocate memory for the bit17 swizzle
- * array, hence protect them from being reaped by removing them from gtt
- * lists early. */
- pages = fetch_and_zero(&obj->mm.pages);
- GEM_BUG_ON(!pages);
-
- if (obj->mm.mapping) {
- void *ptr;
-
- ptr = ptr_mask_bits(obj->mm.mapping);
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
-
- obj->mm.mapping = NULL;
- }
-
- __i915_gem_object_reset_page_iter(obj);
-
- obj->ops->put_pages(obj, pages);
-unlock:
- mutex_unlock(&obj->mm.lock);
-}
-
static bool i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
@@ -2407,184 +2356,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
return ERR_PTR(ret);
}
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
-{
- lockdep_assert_held(&obj->mm.lock);
-
- obj->mm.get_page.sg_pos = pages->sgl;
- obj->mm.get_page.sg_idx = 0;
-
- obj->mm.pages = pages;
-
- if (i915_gem_object_is_tiled(obj) &&
- to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
- }
-}
-
-static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
-{
- struct sg_table *pages;
-
- GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
-
- if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
- DRM_DEBUG("Attempting to obtain a purgeable object\n");
- return -EFAULT;
- }
-
- pages = obj->ops->get_pages(obj);
- if (unlikely(IS_ERR(pages)))
- return PTR_ERR(pages);
-
- __i915_gem_object_set_pages(obj, pages);
- return 0;
-}
-
-/* Ensure that the associated pages are gathered from the backing storage
- * and pinned into our object. i915_gem_object_pin_pages() may be called
- * multiple times before they are released by a single call to
- * i915_gem_object_unpin_pages() - once the pages are no longer referenced
- * either as a result of memory pressure (reaping pages under the shrinker)
- * or as the object is itself released.
- */
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
-{
- int err;
-
- err = mutex_lock_interruptible(&obj->mm.lock);
- if (err)
- return err;
-
- if (unlikely(!obj->mm.pages)) {
- err = ____i915_gem_object_get_pages(obj);
- if (err)
- goto unlock;
-
- smp_mb__before_atomic();
- }
- atomic_inc(&obj->mm.pages_pin_count);
-
-unlock:
- mutex_unlock(&obj->mm.lock);
- return err;
-}
-
-/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
- enum i915_map_type type)
-{
- unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
- struct sg_table *sgt = obj->mm.pages;
- struct sgt_iter sgt_iter;
- struct page *page;
- struct page *stack_pages[32];
- struct page **pages = stack_pages;
- unsigned long i = 0;
- pgprot_t pgprot;
- void *addr;
-
- /* A single page can always be kmapped */
- if (n_pages == 1 && type == I915_MAP_WB)
- return kmap(sg_page(sgt->sgl));
-
- if (n_pages > ARRAY_SIZE(stack_pages)) {
- /* Too big for stack -- allocate temporary array instead */
- pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
- if (!pages)
- return NULL;
- }
-
- for_each_sgt_page(page, sgt_iter, sgt)
- pages[i++] = page;
-
- /* Check that we have the expected number of pages */
- GEM_BUG_ON(i != n_pages);
-
- switch (type) {
- case I915_MAP_WB:
- pgprot = PAGE_KERNEL;
- break;
- case I915_MAP_WC:
- pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
- break;
- }
- addr = vmap(pages, n_pages, 0, pgprot);
-
- if (pages != stack_pages)
- drm_free_large(pages);
-
- return addr;
-}
-
-/* get, pin, and map the pages of the object into kernel space */
-void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
- enum i915_map_type type)
-{
- enum i915_map_type has_type;
- bool pinned;
- void *ptr;
- int ret;
-
- GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
-
- ret = mutex_lock_interruptible(&obj->mm.lock);
- if (ret)
- return ERR_PTR(ret);
-
- pinned = true;
- if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
- if (unlikely(!obj->mm.pages)) {
- ret = ____i915_gem_object_get_pages(obj);
- if (ret)
- goto err_unlock;
-
- smp_mb__before_atomic();
- }
- atomic_inc(&obj->mm.pages_pin_count);
- pinned = false;
- }
- GEM_BUG_ON(!obj->mm.pages);
-
- ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
- if (ptr && has_type != type) {
- if (pinned) {
- ret = -EBUSY;
- goto err_unpin;
- }
-
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
-
- ptr = obj->mm.mapping = NULL;
- }
-
- if (!ptr) {
- ptr = i915_gem_object_map(obj, type);
- if (!ptr) {
- ret = -ENOMEM;
- goto err_unpin;
- }
-
- obj->mm.mapping = ptr_pack_bits(ptr, type);
- }
-
-out_unlock:
- mutex_unlock(&obj->mm.lock);
- return ptr;
-
-err_unpin:
- atomic_dec(&obj->mm.pages_pin_count);
-err_unlock:
- ptr = ERR_PTR(ret);
- goto out_unlock;
-}
-
static bool ban_context(const struct i915_gem_context *ctx)
{
return (i915_gem_context_is_bannable(ctx) &&
@@ -5019,163 +4790,9 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
return ERR_PTR(ret);
}
-struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n,
- unsigned int *offset)
-{
- struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
- struct scatterlist *sg;
- unsigned int idx, count;
-
- might_sleep();
- GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
- /* As we iterate forward through the sg, we record each entry in a
- * radixtree for quick repeated (backwards) lookups. If we have seen
- * this index previously, we will have an entry for it.
- *
- * Initial lookup is O(N), but this is amortized to O(1) for
- * sequential page access (where each new request is consecutive
- * to the previous one). Repeated lookups are O(lg(obj->base.size)),
- * i.e. O(1) with a large constant!
- */
- if (n < READ_ONCE(iter->sg_idx))
- goto lookup;
-
- mutex_lock(&iter->lock);
-
- /* We prefer to reuse the last sg so that repeated lookup of this
- * (or the subsequent) sg are fast - comparing against the last
- * sg is faster than going through the radixtree.
- */
-
- sg = iter->sg_pos;
- idx = iter->sg_idx;
- count = __sg_page_count(sg);
-
- while (idx + count <= n) {
- unsigned long exception, i;
- int ret;
-
- /* If we cannot allocate and insert this entry, or the
- * individual pages from this range, cancel updating the
- * sg_idx so that on this lookup we are forced to linearly
- * scan onwards, but on future lookups we will try the
- * insertion again (in which case we need to be careful of
- * the error return reporting that we have already inserted
- * this index).
- */
- ret = radix_tree_insert(&iter->radix, idx, sg);
- if (ret && ret != -EEXIST)
- goto scan;
-
- exception =
- RADIX_TREE_EXCEPTIONAL_ENTRY |
- idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
- for (i = 1; i < count; i++) {
- ret = radix_tree_insert(&iter->radix, idx + i,
- (void *)exception);
- if (ret && ret != -EEXIST)
- goto scan;
- }
-
- idx += count;
- sg = ____sg_next(sg);
- count = __sg_page_count(sg);
- }
-
-scan:
- iter->sg_pos = sg;
- iter->sg_idx = idx;
-
- mutex_unlock(&iter->lock);
-
- if (unlikely(n < idx)) /* insertion completed by another thread */
- goto lookup;
-
- /* In case we failed to insert the entry into the radixtree, we need
- * to look beyond the current sg.
- */
- while (idx + count <= n) {
- idx += count;
- sg = ____sg_next(sg);
- count = __sg_page_count(sg);
- }
-
- *offset = n - idx;
- return sg;
-
-lookup:
- rcu_read_lock();
-
- sg = radix_tree_lookup(&iter->radix, n);
- GEM_BUG_ON(!sg);
-
- /* If this index is in the middle of multi-page sg entry,
- * the radixtree will contain an exceptional entry that points
- * to the start of that range. We will return the pointer to
- * the base page and the offset of this page within the
- * sg entry's range.
- */
- *offset = 0;
- if (unlikely(radix_tree_exception(sg))) {
- unsigned long base =
- (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
-
- sg = radix_tree_lookup(&iter->radix, base);
- GEM_BUG_ON(!sg);
-
- *offset = n - base;
- }
-
- rcu_read_unlock();
-
- return sg;
-}
-
-struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
-{
- struct scatterlist *sg;
- unsigned int offset;
-
- GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
-
- sg = i915_gem_object_get_sg(obj, n, &offset);
- return nth_page(sg_page(sg), offset);
-}
-
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n)
-{
- struct page *page;
-
- page = i915_gem_object_get_page(obj, n);
- if (!obj->mm.dirty)
- set_page_dirty(page);
-
- return page;
-}
-
-dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n)
-{
- struct scatterlist *sg;
- unsigned int offset;
-
- sg = i915_gem_object_get_sg(obj, n, &offset);
- return sg_dma_address(sg) + (offset << PAGE_SHIFT);
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/scatterlist.c"
#include "selftests/mock_gem_device.c"
-#include "selftests/huge_gem_object.c"
-#include "selftests/i915_gem_object.c"
+#include "selftests/i915_gem.c"
#include "selftests/i915_gem_coherency.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c
new file mode 100644
index 000000000000..f222980cee34
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_object.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_object.h"
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ lockdep_assert_held(&obj->mm.lock);
+
+ obj->mm.get_page.sg_pos = pages->sgl;
+ obj->mm.get_page.sg_idx = 0;
+
+ obj->mm.pages = pages;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ GEM_BUG_ON(obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
+}
+
+static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *pages;
+
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
+ if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
+ DRM_DEBUG("Attempting to obtain a purgeable object\n");
+ return -EFAULT;
+ }
+
+ pages = obj->ops->get_pages(obj);
+ if (unlikely(IS_ERR(pages)))
+ return PTR_ERR(pages);
+
+ __i915_gem_object_set_pages(obj, pages);
+ return 0;
+}
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_pin_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_unpin_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ return err;
+
+ if (unlikely(!obj->mm.pages)) {
+ err = ____i915_gem_object_get_pages(obj);
+ if (err)
+ goto unlock;
+
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+
+unlock:
+ mutex_unlock(&obj->mm.lock);
+ return err;
+}
+
+static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
+{
+ struct radix_tree_iter iter;
+ void **slot;
+
+ radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
+ radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+}
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass)
+{
+ struct sg_table *pages;
+
+ if (i915_gem_object_has_pinned_pages(obj))
+ return;
+
+ GEM_BUG_ON(obj->bind_count);
+ if (!READ_ONCE(obj->mm.pages))
+ return;
+
+ /* May be called by shrinker from within get_pages() (on another bo) */
+ mutex_lock_nested(&obj->mm.lock, subclass);
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+ goto unlock;
+
+ /* ->put_pages might need to allocate memory for the bit17 swizzle
+ * array, hence protect them from being reaped by removing them from gtt
+ * lists early. */
+ pages = fetch_and_zero(&obj->mm.pages);
+ GEM_BUG_ON(!pages);
+
+ if (obj->mm.mapping) {
+ void *ptr;
+
+ ptr = ptr_mask_bits(obj->mm.mapping);
+ if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
+
+ obj->mm.mapping = NULL;
+ }
+
+ __i915_gem_object_reset_page_iter(obj);
+
+ obj->ops->put_pages(obj, pages);
+unlock:
+ mutex_unlock(&obj->mm.lock);
+}
+
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n,
+ unsigned int *offset)
+{
+ struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+ struct scatterlist *sg;
+ unsigned int idx, count;
+
+ might_sleep();
+ GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ /* As we iterate forward through the sg, we record each entry in a
+ * radixtree for quick repeated (backwards) lookups. If we have seen
+ * this index previously, we will have an entry for it.
+ *
+ * Initial lookup is O(N), but this is amortized to O(1) for
+ * sequential page access (where each new request is consecutive
+ * to the previous one). Repeated lookups are O(lg(obj->base.size)),
+ * i.e. O(1) with a large constant!
+ */
+ if (n < READ_ONCE(iter->sg_idx))
+ goto lookup;
+
+ mutex_lock(&iter->lock);
+
+ /* We prefer to reuse the last sg so that repeated lookup of this
+ * (or the subsequent) sg are fast - comparing against the last
+ * sg is faster than going through the radixtree.
+ */
+
+ sg = iter->sg_pos;
+ idx = iter->sg_idx;
+ count = __sg_page_count(sg);
+
+ while (idx + count <= n) {
+ unsigned long exception, i;
+ int ret;
+
+ /* If we cannot allocate and insert this entry, or the
+ * individual pages from this range, cancel updating the
+ * sg_idx so that on this lookup we are forced to linearly
+ * scan onwards, but on future lookups we will try the
+ * insertion again (in which case we need to be careful of
+ * the error return reporting that we have already inserted
+ * this index).
+ */
+ ret = radix_tree_insert(&iter->radix, idx, sg);
+ if (ret && ret != -EEXIST)
+ goto scan;
+
+ exception =
+ RADIX_TREE_EXCEPTIONAL_ENTRY |
+ idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
+ for (i = 1; i < count; i++) {
+ ret = radix_tree_insert(&iter->radix, idx + i,
+ (void *)exception);
+ if (ret && ret != -EEXIST)
+ goto scan;
+ }
+
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+scan:
+ iter->sg_pos = sg;
+ iter->sg_idx = idx;
+
+ mutex_unlock(&iter->lock);
+
+ if (unlikely(n < idx)) /* insertion completed by another thread */
+ goto lookup;
+
+ /* In case we failed to insert the entry into the radixtree, we need
+ * to look beyond the current sg.
+ */
+ while (idx + count <= n) {
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+ *offset = n - idx;
+ return sg;
+
+lookup:
+ rcu_read_lock();
+
+ sg = radix_tree_lookup(&iter->radix, n);
+ GEM_BUG_ON(!sg);
+
+ /* If this index is in the middle of multi-page sg entry,
+ * the radixtree will contain an exceptional entry that points
+ * to the start of that range. We will return the pointer to
+ * the base page and the offset of this page within the
+ * sg entry's range.
+ */
+ *offset = 0;
+ if (unlikely(radix_tree_exception(sg))) {
+ unsigned long base =
+ (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+
+ sg = radix_tree_lookup(&iter->radix, base);
+ GEM_BUG_ON(!sg);
+
+ *offset = n - base;
+ }
+
+ rcu_read_unlock();
+
+ return sg;
+}
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+ return nth_page(sg_page(sg), offset);
+}
+
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n)
+{
+ struct page *page;
+
+ page = i915_gem_object_get_page(obj, n);
+ if (!obj->mm.dirty)
+ set_page_dirty(page);
+
+ return page;
+}
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+ return sg_dma_address(sg) + (offset << PAGE_SHIFT);
+}
+
+/* The 'mapping' part of i915_gem_object_pin_map() below */
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
+{
+ unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+ struct sg_table *sgt = obj->mm.pages;
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ struct page *stack_pages[32];
+ struct page **pages = stack_pages;
+ unsigned long i = 0;
+ pgprot_t pgprot;
+ void *addr;
+
+ /* A single page can always be kmapped */
+ if (n_pages == 1 && type == I915_MAP_WB)
+ return kmap(sg_page(sgt->sgl));
+
+ if (n_pages > ARRAY_SIZE(stack_pages)) {
+ /* Too big for stack -- allocate temporary array instead */
+ pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+ if (!pages)
+ return NULL;
+ }
+
+ for_each_sgt_page(page, sgt_iter, sgt)
+ pages[i++] = page;
+
+ /* Check that we have the expected number of pages */
+ GEM_BUG_ON(i != n_pages);
+
+ switch (type) {
+ case I915_MAP_WB:
+ pgprot = PAGE_KERNEL;
+ break;
+ case I915_MAP_WC:
+ pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
+ break;
+ }
+ addr = vmap(pages, n_pages, 0, pgprot);
+
+ if (pages != stack_pages)
+ drm_free_large(pages);
+
+ return addr;
+}
+
+/* get, pin, and map the pages of the object into kernel space */
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
+{
+ enum i915_map_type has_type;
+ bool pinned;
+ void *ptr;
+ int ret;
+
+ GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+
+ ret = mutex_lock_interruptible(&obj->mm.lock);
+ if (ret)
+ return ERR_PTR(ret);
+
+ pinned = true;
+ if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
+ if (unlikely(!obj->mm.pages)) {
+ ret = ____i915_gem_object_get_pages(obj);
+ if (ret)
+ goto err_unlock;
+
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+ pinned = false;
+ }
+ GEM_BUG_ON(!obj->mm.pages);
+
+ ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
+ if (ptr && has_type != type) {
+ if (pinned) {
+ ret = -EBUSY;
+ goto err_unpin;
+ }
+
+ if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
+
+ ptr = obj->mm.mapping = NULL;
+ }
+
+ if (!ptr) {
+ ptr = i915_gem_object_map(obj, type);
+ if (!ptr) {
+ ret = -ENOMEM;
+ goto err_unpin;
+ }
+
+ obj->mm.mapping = ptr_pack_bits(ptr, type);
+ }
+
+out_unlock:
+ mutex_unlock(&obj->mm.lock);
+ return ptr;
+
+err_unpin:
+ atomic_dec(&obj->mm.pages_pin_count);
+err_unlock:
+ ptr = ERR_PTR(ret);
+ goto out_unlock;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/huge_gem_object.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index c9c9a6cf8bb1..1b0bd6576785 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -33,8 +33,12 @@
#include <drm/i915_drm.h>
+#include "i915_gem_request.h"
+
#include "i915_selftest.h"
+struct drm_i915_gem_object;
+
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
@@ -364,5 +368,111 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
-#endif
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n, unsigned int *offset);
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
+
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n);
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+
+static inline int __must_check
+i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ might_lock(&obj->mm.lock);
+
+ if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
+ return 0;
+
+ return __i915_gem_object_get_pages(obj);
+}
+
+static inline void
+__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ GEM_BUG_ON(!obj->mm.pages);
+
+ atomic_inc(&obj->mm.pages_pin_count);
+}
+
+static inline bool
+i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->mm.pages_pin_count);
+}
+
+static inline void
+__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(!obj->mm.pages);
+ atomic_dec(&obj->mm.pages_pin_count);
+}
+
+static inline void
+i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_unpin_pages(obj);
+}
+
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
+ I915_MM_NORMAL = 0,
+ I915_MM_SHRINKER
+};
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass);
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
+
+enum i915_map_type {
+ I915_MAP_WB = 0,
+ I915_MAP_WC,
+};
+
+/**
+ * i915_gem_object_pin_map - return a contiguous mapping of the entire object
+ * @obj: the object to map into kernel address space
+ * @type: the type of mapping, used to select pgprot_t
+ *
+ * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
+ * pages and then returns a contiguous mapping of the backing storage into
+ * the kernel address space. Based on the @type of mapping, the PTE will be
+ * set to either WriteBack or WriteCombine (via pgprot_t).
+ *
+ * The caller is responsible for calling i915_gem_object_unpin_map() when the
+ * mapping is no longer required.
+ *
+ * Returns the pointer through which to access the mapped object, or an
+ * ERR_PTR() on error.
+ */
+void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+ enum i915_map_type type);
+
+/**
+ * i915_gem_object_unpin_map - releases an earlier mapping
+ * @obj: the object to unmap
+ *
+ * After pinning the object and mapping its pages, once you are finished
+ * with your access, call i915_gem_object_unpin_map() to release the pin
+ * upon the mapping. Once the pin count reaches zero, that mapping may be
+ * removed.
+ */
+static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
similarity index 99%
rename from drivers/gpu/drm/i915/selftests/i915_gem_object.c
rename to drivers/gpu/drm/i915/selftests/i915_gem.c
index 67d82bf1407f..13f05263a574 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -569,7 +569,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
-int i915_gem_object_mock_selftests(void)
+int i915_gem_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_gem_object),
@@ -588,7 +588,7 @@ int i915_gem_object_mock_selftests(void)
return err;
}
-int i915_gem_object_live_selftests(struct drm_i915_private *i915)
+int i915_gem_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_gem_huge),
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index aa680c69e5a8..c7194e6f6af9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -11,7 +11,7 @@
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
selftest(requests, i915_gem_request_live_selftests)
-selftest(objects, i915_gem_object_live_selftests)
+selftest(gem, i915_gem_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
selftest(gtt, i915_gem_gtt_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index be9a9ebf5692..3c862db41c2c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -13,7 +13,7 @@ selftest(scatterlist, scatterlist_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
selftest(requests, i915_gem_request_mock_selftests)
-selftest(objects, i915_gem_object_mock_selftests)
+selftest(gem, i915_gem_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
selftest(vma, i915_vma_mock_selftests)
selftest(evict, i915_gem_evict_mock_selftests)
--
2.11.0
More information about the Intel-gfx
mailing list