[Intel-gfx] [PATCH 15/19] drm/i915/selftests: basic huge page tests
Matthew Auld
matthew.auld at intel.com
Wed Jun 21 20:33:41 UTC 2017
Signed-off-by: Matthew Auld <matthew.auld at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_gem.c | 1 +
drivers/gpu/drm/i915/selftests/huge_pages.c | 604 +++++++++++++++++++++
.../gpu/drm/i915/selftests/i915_live_selftests.h | 1 +
3 files changed, 606 insertions(+)
create mode 100644 drivers/gpu/drm/i915/selftests/huge_pages.c
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c4a27b4f419c..6cced283e9de 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5413,6 +5413,7 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
#include "selftests/scatterlist.c"
#include "selftests/mock_gem_device.c"
#include "selftests/huge_gem_object.c"
+#include "selftests/huge_pages.c"
#include "selftests/i915_gem_object.c"
#include "selftests/i915_gem_coherency.c"
#endif
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
new file mode 100644
index 000000000000..5e0437045804
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -0,0 +1,604 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include <linux/prime_numbers.h>
+
+#include "mock_drm.h"
+
+static unsigned int page_sizes[] = {
+ I915_GTT_PAGE_SIZE_1G,
+ I915_GTT_PAGE_SIZE_2M,
+ I915_GTT_PAGE_SIZE_64K,
+ I915_GTT_PAGE_SIZE_4K,
+};
+
+static unsigned int get_largest_page_size(struct drm_i915_private *i915,
+ unsigned int rem)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
+ unsigned int page_size = page_sizes[i];
+
+ if (HAS_PAGE_SIZE(i915, page_size) && rem >= page_size)
+ return page_size;
+ }
+
+ GEM_BUG_ON(1);
+}
+
+static struct sg_table *
+fake_get_huge_pages(struct drm_i915_gem_object *obj,
+ unsigned int *sg_mask)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ const unsigned long max = obj->base.size >> PAGE_SHIFT;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ typeof(obj->base.size) rem;
+
+ st = kmalloc(sizeof(*st), GFP);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(st, max, GFP)) {
+ kfree(st);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Use the optimal page sized chunks to fill in the sg table from large
+ * to small.
+ */
+ rem = obj->base.size;
+ sg = st->sgl;
+ st->nents = 0;
+ do {
+ unsigned int page_size = get_largest_page_size(i915, rem);
+ unsigned int len = page_size * (rem / page_size);
+
+ sg->offset = 0;
+ sg->length = len;
+ sg_dma_len(sg) = len;
+ sg_dma_address(sg) = page_size;
+
+ *sg_mask |= len;
+
+ st->nents++;
+
+ rem -= len;
+ if (!rem) {
+ sg_mark_end(sg);
+ break;
+ }
+
+ sg = sg_next(sg);
+ } while (1);
+
+ obj->mm.madv = I915_MADV_DONTNEED;
+
+ return st;
+#undef GFP
+}
+
+static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ fake_free_huge_pages(obj, pages);
+ obj->mm.dirty = false;
+ obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops fake_ops = {
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = fake_get_huge_pages,
+ .put_pages = fake_put_huge_pages,
+};
+
+static struct drm_i915_gem_object *
+fake_huge_paged_object(struct drm_i915_private *i915, u64 size)
+{
+ struct drm_i915_gem_object *obj;
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc(i915);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &fake_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = I915_CACHE_NONE;
+
+ return obj;
+}
+
+static int igt_ppgtt_huge_fill(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_gem_object *obj;
+ unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
+ unsigned long page_num;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ for_each_prime_number_from(page_num, 1, max_pages) {
+ unsigned int expected_gtt = 0;
+ struct i915_vma *vma;
+ typeof(obj->base.size) size;
+ int i;
+
+ obj = fake_huge_paged_object(i915, page_num << PAGE_SHIFT);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ GEM_BUG_ON(obj->base.size != page_num << PAGE_SHIFT);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ GEM_BUG_ON(!obj->mm.page_sizes.sg);
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ GEM_BUG_ON(obj->mm.page_sizes.gtt);
+ GEM_BUG_ON(!vma->page_sizes.sg);
+ GEM_BUG_ON(!vma->page_sizes.phys);
+
+ size = obj->base.size;
+
+ /* Figure out the expected gtt page size knowing that we go from
+ * largest to smallest page size sg chunks, and that we align to
+ * the largest page size.
+ */
+ for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
+ unsigned int page_size = page_sizes[i];
+
+ if (HAS_PAGE_SIZE(i915, page_size) && size >= page_size) {
+ expected_gtt |= page_size;
+ size &= page_size-1;
+ }
+ }
+
+ GEM_BUG_ON(!expected_gtt);
+ GEM_BUG_ON(size);
+
+ if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
+ expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
+
+ if (i915_vm_is_48bit(&ppgtt->base)) {
+ GEM_BUG_ON(vma->page_sizes.gtt != expected_gtt);
+
+ if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.start,
+ I915_GTT_PAGE_SIZE_2M));
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.size,
+ I915_GTT_PAGE_SIZE_2M));
+ }
+ } else {
+ GEM_BUG_ON(vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K);
+ GEM_BUG_ON(vma->node.size != page_num << PAGE_SHIFT);
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+
+ if (igt_timeout(end_time,
+ "%s timed out at size %lx\n",
+ __func__, obj->base.size))
+ break;
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+
+static int igt_ppgtt_misaligned_dma(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ unsigned long supported = INTEL_INFO(i915)->page_size_mask;
+ struct drm_i915_gem_object *obj;
+ int err;
+ int bit;
+
+ /* Sanity check dma misalignment for huge pages -- the dma addresses we
+ * insert into the paging structures need to always respect the page
+ * size alignment.
+ */
+
+ bit = ilog2(I915_GTT_PAGE_SIZE_64K);
+
+ for_each_set_bit_from(bit, &supported, BITS_PER_LONG) {
+ IGT_TIMEOUT(end_time);
+ unsigned int page_size = BIT(bit);
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ struct i915_vma *vma;
+ unsigned int offset;
+ unsigned int size =
+ round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
+
+ obj = fake_huge_paged_object(i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ GEM_BUG_ON(obj->base.size != size);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ GEM_BUG_ON(!(obj->mm.page_sizes.sg & page_size));
+
+ /* Force the page size for this object */
+ obj->mm.page_sizes.sg = page_size;
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ GEM_BUG_ON(vma->page_sizes.gtt != page_size);
+
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ if (err)
+ goto out_unpin;
+
+ /* Try all the other valid offsets until the next boundary --
+ * should always fall back to using 4K pages.
+ */
+ for (offset = 4096; offset < page_size; offset += 4096) {
+ err = i915_vma_pin(vma, 0, 0, flags | offset);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ GEM_BUG_ON(vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K);
+
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ if (err)
+ goto out_unpin;
+
+ if (igt_timeout(end_time,
+ "%s timed out at offset %x with page-size %x\n",
+ __func__, offset, page_size))
+ break;
+ }
+
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static int igt_ppgtt_64K(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_gem_object *obj;
+ struct object_info {
+ unsigned int size;
+ unsigned int gtt;
+ unsigned int offset;
+ } objects[] = {
+ /* Cases with forced padding/alignment */
+ {
+ .size = SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_64K + SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ .offset = 0.
+ },
+ {
+ .size = SZ_2M - SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M + SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M + SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ /* Try without any forced padding/alignment */
+ {
+ .size = SZ_64K,
+ .offset = SZ_2M,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ },
+ {
+ .size = SZ_128K,
+ .offset = SZ_2M - SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ },
+ };
+ int i;
+ int err;
+
+ if (!HAS_PAGE_SIZE(i915, I915_GTT_PAGE_SIZE_64K))
+ return 0;
+
+ GEM_BUG_ON(!i915_vm_is_48bit(&ppgtt->base));
+
+ /* Sanity check some of the trickiness with 64K pages -- either we can
+ * safely mark the whole page-table(2M block) as 64K, or we have to
+ * always fallback to 4K.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(objects); ++i) {
+ unsigned int size = objects[i].size;
+ unsigned int expected_gtt = objects[i].gtt;
+ unsigned int offset = objects[i].offset;
+ struct i915_vma *vma;
+ int flags = PIN_USER;
+
+ obj = fake_huge_paged_object(i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ GEM_BUG_ON(!obj->mm.page_sizes.sg);
+
+ /* Disable 2M pages -- We only want to use 64K/4K pages for
+ * this test.
+ */
+ obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unpin;
+ }
+
+ if (offset)
+ flags |= PIN_OFFSET_FIXED | offset;
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ GEM_BUG_ON(obj->mm.page_sizes.gtt);
+ GEM_BUG_ON(!vma->page_sizes.sg);
+ GEM_BUG_ON(!vma->page_sizes.phys);
+
+ GEM_BUG_ON(vma->page_sizes.gtt != expected_gtt);
+
+ if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.start,
+ I915_GTT_PAGE_SIZE_2M));
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.size,
+ I915_GTT_PAGE_SIZE_2M));
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static int igt_ppgtt_gemfs_huge(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_gem_object *obj;
+ unsigned int object_sizes[] = {
+ I915_GTT_PAGE_SIZE_2M,
+ I915_GTT_PAGE_SIZE_2M + I915_GTT_PAGE_SIZE_4K,
+ };
+ int err;
+ int i;
+
+ if (!HAS_PAGE_SIZE(i915, I915_GTT_PAGE_SIZE_2M) ||
+ !IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) ||
+ !has_transparent_hugepage())
+ return 0;
+
+ /* Sanity check THP through gemfs */
+
+ for (i = 0; i < ARRAY_SIZE(object_sizes); ++i) {
+ unsigned int size = object_sizes[i];
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create(i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ GEM_BUG_ON(!obj->mm.page_sizes.sg);
+
+ if (obj->mm.page_sizes.sg < size) {
+ pr_err("Failed to allocate huge pages\n");
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ /* TODO: maybe we should try writing to the obj from the gpu
+ * pov, since we require various things to be enabled/disabled
+ * for huge-pages to work correctly. Also if we force huge-pages
+ * for gemfs we should be able to exercise 64K pages, since this
+ * should always give us a 64K aligned memory region.
+ */
+
+ GEM_BUG_ON(obj->mm.page_sizes.gtt);
+ GEM_BUG_ON(!vma->page_sizes.sg);
+ GEM_BUG_ON(!vma->page_sizes.phys);
+
+ if (i915_vm_is_48bit(&ppgtt->base)) {
+ GEM_BUG_ON(vma->page_sizes.gtt != size);
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I915_GTT_PAGE_SIZE_2M));
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.size, I915_GTT_PAGE_SIZE_2M));
+ } else {
+ GEM_BUG_ON(vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K);
+ GEM_BUG_ON(vma->node.size != size);
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_ppgtt_huge_fill),
+ SUBTEST(igt_ppgtt_misaligned_dma),
+ SUBTEST(igt_ppgtt_64K),
+ SUBTEST(igt_ppgtt_gemfs_huge),
+ };
+ struct i915_hw_ppgtt *ppgtt;
+ struct drm_file *file;
+ int ret;
+
+ file = mock_file(dev_priv);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
+ if (IS_ERR(ppgtt)) {
+ ret = PTR_ERR(ppgtt);
+ goto out_unlock;
+ }
+
+ ret = i915_subtests(tests, ppgtt);
+
+ i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_put(ppgtt);
+
+out_unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ mock_file_free(dev_priv, file);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 18b174d855ca..6b49cbb49535 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -15,5 +15,6 @@ selftest(objects, i915_gem_object_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
selftest(gtt, i915_gem_gtt_live_selftests)
+selftest(huge, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
--
2.9.4
More information about the Intel-gfx
mailing list