[Intel-gfx] [PATCH 15/15] drm/i915/selftests: modify the gtt tests to also exercise huge pages
Matthew Auld
matthew.auld at intel.com
Mon Mar 6 23:54:14 UTC 2017
Signed-off-by: Matthew Auld <matthew.auld at intel.com>
---
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 123 ++++++++++++++++++--------
drivers/gpu/drm/i915/selftests/mock_gtt.c | 3 +
2 files changed, 89 insertions(+), 37 deletions(-)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 9b2a7228a78f..4625f93a3890 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -92,12 +92,14 @@ static const struct drm_i915_gem_object_ops fake_ops = {
};
static struct drm_i915_gem_object *
-fake_dma_object(struct drm_i915_private *i915, u64 size)
+fake_dma_object(struct drm_i915_private *i915, u64 size, unsigned long page_size)
{
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
- GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(page_size & ~I915_GTT_PAGE_SIZE_MASK);
+
+ size = roundup(size, page_size);
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
@@ -107,8 +109,13 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
goto err;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
+
i915_gem_object_init(obj, &fake_ops);
+ obj->page_size = page_size;
+
+ GEM_BUG_ON(!IS_ALIGNED(obj->base.size, obj->page_size));
+
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
@@ -194,13 +201,14 @@ static int igt_ppgtt_alloc(void *arg)
static int lowlevel_hole(struct drm_i915_private *i915,
struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
+ unsigned long page_size,
unsigned long end_time)
{
I915_RND_STATE(seed_prng);
unsigned int size;
/* Keep creating larger objects until one cannot fit into the hole */
- for (size = 12; (hole_end - hole_start) >> size; size++) {
+ for (size = ilog2(page_size); (hole_end - hole_start) >> size; size++) {
I915_RND_SUBSTATE(prng, seed_prng);
struct drm_i915_gem_object *obj;
unsigned int *order, count, n;
@@ -226,7 +234,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
* memory. We expect to hit -ENOMEM.
*/
- obj = fake_dma_object(i915, BIT_ULL(size));
+ obj = fake_dma_object(i915, BIT_ULL(size), page_size);
if (IS_ERR(obj)) {
kfree(order);
break;
@@ -303,18 +311,25 @@ static void close_object_list(struct list_head *objects,
static int fill_hole(struct drm_i915_private *i915,
struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
+ unsigned long page_size,
unsigned long end_time)
{
const u64 hole_size = hole_end - hole_start;
struct drm_i915_gem_object *obj;
- const unsigned long max_pages =
- min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
- const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
- unsigned long npages, prime, flags;
+ const unsigned page_shift = ilog2(page_size);
+ unsigned long max_pages, max_step, npages, prime, flags;
struct i915_vma *vma;
LIST_HEAD(objects);
int err;
+ hole_start = roundup(hole_start, page_size);
+ hole_end = rounddown(hole_end, page_size);
+
+ GEM_BUG_ON(hole_start >= hole_end);
+
+ max_pages = min_t(u64, ULONG_MAX - 1, hole_size/2 >> page_shift);
+ max_step = max(int_sqrt(max_pages), 2UL);
+
/* Try binding many VMA working inwards from either edge */
flags = PIN_OFFSET_FIXED | PIN_USER;
@@ -323,7 +338,7 @@ static int fill_hole(struct drm_i915_private *i915,
for_each_prime_number_from(prime, 2, max_step) {
for (npages = 1; npages <= max_pages; npages *= prime) {
- const u64 full_size = npages << PAGE_SHIFT;
+ const u64 full_size = npages << page_shift;
const struct {
const char *name;
u64 offset;
@@ -334,7 +349,7 @@ static int fill_hole(struct drm_i915_private *i915,
{ }
}, *p;
- obj = fake_dma_object(i915, full_size);
+ obj = fake_dma_object(i915, full_size, page_size);
if (IS_ERR(obj))
break;
@@ -359,7 +374,7 @@ static int fill_hole(struct drm_i915_private *i915,
offset -= obj->base.size;
}
- err = i915_vma_pin(vma, 0, 0, offset | flags);
+ err = i915_vma_pin(vma, 0, page_size, offset | flags);
if (err) {
pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
__func__, p->name, err, npages, prime, offset);
@@ -367,7 +382,7 @@ static int fill_hole(struct drm_i915_private *i915,
}
if (!drm_mm_node_allocated(&vma->node) ||
- i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ i915_vma_misplaced(vma, 0, page_size, offset | flags)) {
pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
__func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
offset);
@@ -397,7 +412,7 @@ static int fill_hole(struct drm_i915_private *i915,
}
if (!drm_mm_node_allocated(&vma->node) ||
- i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ i915_vma_misplaced(vma, 0, page_size, offset | flags)) {
pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
__func__, p->name, vma->node.start, vma->node.size,
offset);
@@ -432,7 +447,7 @@ static int fill_hole(struct drm_i915_private *i915,
offset -= obj->base.size;
}
- err = i915_vma_pin(vma, 0, 0, offset | flags);
+ err = i915_vma_pin(vma, 0, page_size, offset | flags);
if (err) {
pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
__func__, p->name, err, npages, prime, offset);
@@ -440,7 +455,7 @@ static int fill_hole(struct drm_i915_private *i915,
}
if (!drm_mm_node_allocated(&vma->node) ||
- i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ i915_vma_misplaced(vma, 0, page_size, offset | flags)) {
pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
__func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
offset);
@@ -470,7 +485,7 @@ static int fill_hole(struct drm_i915_private *i915,
}
if (!drm_mm_node_allocated(&vma->node) ||
- i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ i915_vma_misplaced(vma, 0, page_size, offset | flags)) {
pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
__func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
offset);
@@ -514,11 +529,13 @@ static int fill_hole(struct drm_i915_private *i915,
static int walk_hole(struct drm_i915_private *i915,
struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
+ unsigned long page_size,
unsigned long end_time)
{
const u64 hole_size = hole_end - hole_start;
+ const unsigned page_shift = ilog2(page_size);
const unsigned long max_pages =
- min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
+ min_t(u64, ULONG_MAX - 1, hole_size >> page_shift);
unsigned long flags;
u64 size;
@@ -534,7 +551,7 @@ static int walk_hole(struct drm_i915_private *i915,
u64 addr;
int err = 0;
- obj = fake_dma_object(i915, size << PAGE_SHIFT);
+ obj = fake_dma_object(i915, size << page_shift, page_size);
if (IS_ERR(obj))
break;
@@ -547,7 +564,7 @@ static int walk_hole(struct drm_i915_private *i915,
for (addr = hole_start;
addr + obj->base.size < hole_end;
addr += obj->base.size) {
- err = i915_vma_pin(vma, 0, 0, addr | flags);
+ err = i915_vma_pin(vma, 0, page_size, addr | flags);
if (err) {
pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
__func__, addr, vma->size,
@@ -557,7 +574,7 @@ static int walk_hole(struct drm_i915_private *i915,
i915_vma_unpin(vma);
if (!drm_mm_node_allocated(&vma->node) ||
- i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ i915_vma_misplaced(vma, 0, page_size, addr | flags)) {
pr_err("%s incorrect at %llx + %llx\n",
__func__, addr, vma->size);
err = -EINVAL;
@@ -595,6 +612,7 @@ static int walk_hole(struct drm_i915_private *i915,
static int pot_hole(struct drm_i915_private *i915,
struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
+ unsigned long page_size,
unsigned long end_time)
{
struct drm_i915_gem_object *obj;
@@ -607,7 +625,7 @@ static int pot_hole(struct drm_i915_private *i915,
if (i915_is_ggtt(vm))
flags |= PIN_GLOBAL;
- obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
+ obj = fake_dma_object(i915, 2 * page_size, page_size);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -619,15 +637,15 @@ static int pot_hole(struct drm_i915_private *i915,
/* Insert a pair of pages across every pot boundary within the hole */
for (pot = fls64(hole_end - 1) - 1;
- pot > ilog2(2 * I915_GTT_PAGE_SIZE);
+ pot > ilog2(2 * page_size);
pot--) {
u64 step = BIT_ULL(pot);
u64 addr;
- for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
- addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
+ for (addr = round_up(hole_start + page_size, step) - page_size;
+ addr <= round_down(hole_end - 2*page_size, step) - page_size;
addr += step) {
- err = i915_vma_pin(vma, 0, 0, addr | flags);
+ err = i915_vma_pin(vma, 0, page_size, addr | flags);
if (err) {
pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
__func__,
@@ -671,6 +689,7 @@ static int pot_hole(struct drm_i915_private *i915,
static int drunk_hole(struct drm_i915_private *i915,
struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
+ unsigned long page_size,
unsigned long end_time)
{
I915_RND_STATE(prng);
@@ -682,7 +701,7 @@ static int drunk_hole(struct drm_i915_private *i915,
flags |= PIN_GLOBAL;
/* Keep creating larger objects until one cannot fit into the hole */
- for (size = 12; (hole_end - hole_start) >> size; size++) {
+ for (size = ilog2(page_size); (hole_end - hole_start) >> size; size++) {
struct drm_i915_gem_object *obj;
unsigned int *order, count, n;
struct i915_vma *vma;
@@ -706,7 +725,7 @@ static int drunk_hole(struct drm_i915_private *i915,
* memory. We expect to hit -ENOMEM.
*/
- obj = fake_dma_object(i915, BIT_ULL(size));
+ obj = fake_dma_object(i915, BIT_ULL(size), page_size);
if (IS_ERR(obj)) {
kfree(order);
break;
@@ -723,6 +742,8 @@ static int drunk_hole(struct drm_i915_private *i915,
for (n = 0; n < count; n++) {
u64 addr = hole_start + order[n] * BIT_ULL(size);
+ GEM_BUG_ON(!IS_ALIGNED(addr, page_size));
+
err = i915_vma_pin(vma, 0, 0, addr | flags);
if (err) {
pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
@@ -734,7 +755,7 @@ static int drunk_hole(struct drm_i915_private *i915,
}
if (!drm_mm_node_allocated(&vma->node) ||
- i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ i915_vma_misplaced(vma, 0, page_size, addr | flags)) {
pr_err("%s incorrect at %llx + %llx\n",
__func__, addr, BIT_ULL(size));
i915_vma_unpin(vma);
@@ -771,11 +792,12 @@ static int drunk_hole(struct drm_i915_private *i915,
static int __shrink_hole(struct drm_i915_private *i915,
struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
+ unsigned long page_size,
unsigned long end_time)
{
struct drm_i915_gem_object *obj;
unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
- unsigned int order = 12;
+ unsigned int order = ilog2(page_size);
LIST_HEAD(objects);
int err = 0;
u64 addr;
@@ -786,7 +808,7 @@ static int __shrink_hole(struct drm_i915_private *i915,
u64 size = BIT_ULL(order++);
size = min(size, hole_end - addr);
- obj = fake_dma_object(i915, size);
+ obj = fake_dma_object(i915, size, page_size);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
break;
@@ -802,7 +824,7 @@ static int __shrink_hole(struct drm_i915_private *i915,
GEM_BUG_ON(vma->size != size);
- err = i915_vma_pin(vma, 0, 0, addr | flags);
+ err = i915_vma_pin(vma, 0, page_size, addr | flags);
if (err) {
pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
__func__, addr, size, hole_start, hole_end, err);
@@ -810,7 +832,7 @@ static int __shrink_hole(struct drm_i915_private *i915,
}
if (!drm_mm_node_allocated(&vma->node) ||
- i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ i915_vma_misplaced(vma, 0, page_size, addr | flags)) {
pr_err("%s incorrect at %llx + %llx\n",
__func__, addr, size);
i915_vma_unpin(vma);
@@ -837,6 +859,7 @@ static int __shrink_hole(struct drm_i915_private *i915,
static int shrink_hole(struct drm_i915_private *i915,
struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
+ unsigned long page_size,
unsigned long end_time)
{
unsigned long prime;
@@ -847,7 +870,8 @@ static int shrink_hole(struct drm_i915_private *i915,
for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
vm->fault_attr.interval = prime;
- err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
+ err = __shrink_hole(i915, vm, hole_start, hole_end, page_size,
+ end_time);
if (err)
break;
}
@@ -861,12 +885,20 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
int (*func)(struct drm_i915_private *i915,
struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
+ unsigned long page_size,
unsigned long end_time))
{
struct drm_file *file;
struct i915_hw_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
- int err;
+ unsigned long page_sizes[] = {
+ I915_GTT_PAGE_SIZE,
+ I915_GTT_PAGE_SIZE_64K,
+ I915_GTT_PAGE_SIZE_2M,
+ I915_GTT_PAGE_SIZE_1G,
+ };
+ int err = 0;
+ int i;
if (!USES_FULL_PPGTT(dev_priv))
return 0;
@@ -884,7 +916,11 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
GEM_BUG_ON(offset_in_page(ppgtt->base.total));
GEM_BUG_ON(ppgtt->base.closed);
- err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
+ for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
+ if (INTEL_INFO(dev_priv)->page_size_mask & page_sizes[i])
+ err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total,
+ page_sizes[i], end_time);
+ }
i915_ppgtt_close(&ppgtt->base);
i915_ppgtt_put(ppgtt);
@@ -940,6 +976,7 @@ static int exercise_ggtt(struct drm_i915_private *i915,
int (*func)(struct drm_i915_private *i915,
struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
+ unsigned long page_size,
unsigned long end_time))
{
struct i915_ggtt *ggtt = &i915->ggtt;
@@ -961,7 +998,8 @@ static int exercise_ggtt(struct drm_i915_private *i915,
if (hole_start >= hole_end)
continue;
- err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
+ err = func(i915, &ggtt->base, hole_start, hole_end,
+ I915_GTT_PAGE_SIZE, end_time);
if (err)
break;
@@ -1104,12 +1142,20 @@ static int exercise_mock(struct drm_i915_private *i915,
int (*func)(struct drm_i915_private *i915,
struct i915_address_space *vm,
u64 hole_start, u64 hole_end,
+ unsigned long page_size,
unsigned long end_time))
{
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
+ unsigned long page_sizes[] = {
+ I915_GTT_PAGE_SIZE,
+ I915_GTT_PAGE_SIZE_64K,
+ I915_GTT_PAGE_SIZE_2M,
+ I915_GTT_PAGE_SIZE_1G,
+ };
int err;
+ int i;
ctx = mock_context(i915, "mock");
if (!ctx)
@@ -1118,7 +1164,10 @@ static int exercise_mock(struct drm_i915_private *i915,
ppgtt = ctx->ppgtt;
GEM_BUG_ON(!ppgtt);
- err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
+ for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
+ err = func(i915, &ppgtt->base, 0, ppgtt->base.total,
+ page_sizes[i], end_time);
+ }
mock_context_close(ctx);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index a2137100d2f5..9428ea09d05d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -88,6 +88,9 @@ mock_ppgtt(struct drm_i915_private *i915,
ppgtt->base.unbind_vma = mock_unbind_ppgtt;
ppgtt->base.cleanup = mock_cleanup;
+ /* For mock testing huge-page support */
+ ppgtt->base.mm.color_adjust = i915_page_color_adjust;
+
return ppgtt;
}
--
2.9.3
More information about the Intel-gfx
mailing list