[PATCH v11 3/3] drm/tests: Add a test case for drm buddy clear allocation
Arunpravin Paneer Selvam
Arunpravin.PaneerSelvam at amd.com
Sun Apr 14 21:23:49 UTC 2024
Add a new test case for the drm buddy clear and dirty
allocation.
v2:(Matthew)
- make size as u32
- rename PAGE_SIZE with SZ_4K
- dont fragment the address space for all the order allocation
iterations. we can do it once and just increment and allocate
the size.
- create new mm with non power-of-two size to ensure the multi-root
force_merge during fini.
v3:
- add randomness in size calculation(Matthew)
Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam at amd.com>
Reviewed-by: Matthew Auld <matthew.auld at intel.com>
Suggested-by: Matthew Auld <matthew.auld at intel.com>
---
drivers/gpu/drm/tests/drm_buddy_test.c | 143 +++++++++++++++++++++++++
1 file changed, 143 insertions(+)
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index 4621a860cb05..e3b50e240d36 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -224,6 +224,148 @@ static void drm_test_buddy_alloc_range_bias(struct kunit *test)
drm_buddy_fini(&mm);
}
+static void drm_test_buddy_alloc_clear(struct kunit *test)
+{
+ unsigned long n_pages, total, i = 0;
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned long ps = SZ_4K;
+ struct drm_buddy_block *block;
+ const int max_order = 12;
+ LIST_HEAD(allocated);
+ struct drm_buddy mm;
+ unsigned int order;
+ u32 mm_size, size;
+ LIST_HEAD(dirty);
+ LIST_HEAD(clean);
+
+ mm_size = SZ_4K << max_order;
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+ /*
+ * Idea is to allocate and free some random portion of the address space,
+ * returning those pages as non-dirty and randomly alternate between
+ * requesting dirty and non-dirty pages (not going over the limit
+ * we freed as non-dirty), putting that into two separate lists.
+ * Loop over both lists at the end checking that the dirty list
+ * is indeed all dirty pages and vice versa. Free it all again,
+ * keeping the dirty/clear status.
+ */
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 5 * ps, ps, &allocated,
+ DRM_BUDDY_TOPDOWN_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 5 * ps);
+ drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
+
+ n_pages = 10;
+ do {
+ unsigned long flags;
+ struct list_head *list;
+ int slot = i % 2;
+
+ if (slot == 0) {
+ list = &dirty;
+ flags = 0;
+ } else {
+ list = &clean;
+ flags = DRM_BUDDY_CLEAR_ALLOCATION;
+ }
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ ps, ps, list,
+ flags),
+ "buddy_alloc hit an error size=%lu\n", ps);
+ } while (++i < n_pages);
+
+ list_for_each_entry(block, &clean, link)
+ KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true);
+
+ list_for_each_entry(block, &dirty, link)
+ KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
+
+ drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+
+ /*
+ * Trying to go over the clear limit for some allocation.
+ * The allocation should never fail with reasonable page-size.
+ */
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 10 * ps, ps, &clean,
+ DRM_BUDDY_CLEAR_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 10 * ps);
+
+ drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+ drm_buddy_free_list(&mm, &dirty, 0);
+ drm_buddy_fini(&mm);
+
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+
+ /*
+ * Create a new mm. Intentionally fragment the address space by creating
+ * two alternating lists. Free both lists, one as dirty the other as clean.
+ * Try to allocate double the previous size with matching min_page_size. The
+ * allocation should never fail as it calls the force_merge. Also check that
+ * the page is always dirty after force_merge. Free the page as dirty, then
+ * repeat the whole thing, increment the order until we hit the max_order.
+ */
+
+ i = 0;
+ n_pages = mm_size / ps;
+ do {
+ struct list_head *list;
+ int slot = i % 2;
+
+ if (slot == 0)
+ list = &dirty;
+ else
+ list = &clean;
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ ps, ps, list, 0),
+ "buddy_alloc hit an error size=%lu\n", ps);
+ } while (++i < n_pages);
+
+ drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
+ drm_buddy_free_list(&mm, &dirty, 0);
+
+ order = 1;
+ do {
+ size = SZ_4K << order;
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ size, size, &allocated,
+ DRM_BUDDY_CLEAR_ALLOCATION),
+ "buddy_alloc hit an error size=%u\n", size);
+ total = 0;
+ list_for_each_entry(block, &allocated, link) {
+ if (size != mm_size)
+ KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
+ total += drm_buddy_block_size(&mm, block);
+ }
+ KUNIT_EXPECT_EQ(test, total, size);
+
+ drm_buddy_free_list(&mm, &allocated, 0);
+ } while (++order <= max_order);
+
+ drm_buddy_fini(&mm);
+
+ /*
+ * Create a new mm with a non power-of-two size. Allocate a random size, free as
+ * cleared and then call fini. This will ensure the multi-root force merge during
+ * fini.
+ */
+ mm_size = 12 * SZ_4K;
+ size = max(round_up(prandom_u32_state(&prng) % mm_size, ps), ps);
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ size, ps, &allocated,
+ DRM_BUDDY_TOPDOWN_ALLOCATION),
+ "buddy_alloc hit an error size=%u\n", size);
+ drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
+ drm_buddy_fini(&mm);
+}
+
static void drm_test_buddy_alloc_contiguous(struct kunit *test)
{
const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
@@ -584,6 +726,7 @@ static struct kunit_case drm_buddy_tests[] = {
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
KUNIT_CASE(drm_test_buddy_alloc_pathological),
KUNIT_CASE(drm_test_buddy_alloc_contiguous),
+ KUNIT_CASE(drm_test_buddy_alloc_clear),
KUNIT_CASE(drm_test_buddy_alloc_range_bias),
{}
};
--
2.25.1
More information about the amd-gfx
mailing list