[RFC PATCH 3/3] drm/ttm/tests: Add tests for ttm_pool

Karolina Stolarek karolina.stolarek at intel.com
Mon Jun 12 11:22:34 UTC 2023


Hi Christian,

I have a question about how ttm_pool_alloc should handle a request for 
order > MAX_ORDER. Could you take a look at my comment below? Thanks a lot.

On 12.06.2023 12:55, Karolina Stolarek wrote:
> Add KUnit tests that exercise page allocation using page pools
> and freeing pages, either by returning them to the pool or
> freeing them. Add a basic test for ttm_pool cleanup. Introduce
> helpers to create a dummy ttm_buffer_object.
> 
> Signed-off-by: Karolina Stolarek <karolina.stolarek at intel.com>
> ---
>   drivers/gpu/drm/ttm/tests/Makefile            |   1 +
>   drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c |  27 ++
>   drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h |   5 +
>   drivers/gpu/drm/ttm/tests/ttm_pool_test.c     | 401 ++++++++++++++++++
>   4 files changed, 434 insertions(+)
>   create mode 100644 drivers/gpu/drm/ttm/tests/ttm_pool_test.c
> 
> diff --git a/drivers/gpu/drm/ttm/tests/Makefile b/drivers/gpu/drm/ttm/tests/Makefile
> index 7917805f37af..ec87c4fc1ad5 100644
> --- a/drivers/gpu/drm/ttm/tests/Makefile
> +++ b/drivers/gpu/drm/ttm/tests/Makefile
> @@ -2,4 +2,5 @@
>   
>   obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += \
>           ttm_device_test.o \
> +        ttm_pool_test.o \
>           ttm_kunit_helpers.o
> diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
> index d848c2372db9..f1b5df61e0bf 100644
> --- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
> +++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
> @@ -24,6 +24,33 @@ int ttm_kunit_helper_alloc_device(struct kunit *test,
>   	return err;
>   }
>   
> +struct ttm_buffer_object *ttm_kunit_helper_ttm_bo_init(struct kunit *test,
> +						       size_t size)
> +{
> +	struct ttm_test_devices_priv *priv = test->priv;
> +	struct drm_gem_object *gem_obj;
> +	struct ttm_buffer_object *bo;
> +
> +	gem_obj = kunit_kzalloc(test, sizeof(*gem_obj), GFP_KERNEL);
> +	KUNIT_ASSERT_NOT_NULL(test, gem_obj);
> +
> +	drm_gem_private_object_init(priv->drm, gem_obj, size);
> +
> +	bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
> +	KUNIT_ASSERT_NOT_NULL(test, bo);
> +
> +	bo->sg = NULL;
> +	bo->base = *gem_obj;
> +
> +	return bo;
> +}
> +
> +void ttm_kunit_helper_ttm_bo_fini(struct ttm_buffer_object *bo)
> +{
> +	drm_gem_object_release(&bo->base);
> +	ttm_bo_put(bo);
> +}
> +
>   int ttm_test_devices_init(struct kunit *test)
>   {
>   	struct ttm_test_devices_priv *priv;
> diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
> index 69fb03b9c4d2..abb8279f18c7 100644
> --- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
> +++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
> @@ -7,6 +7,7 @@
>   
>   #include <drm/drm_drv.h>
>   #include <drm/ttm/ttm_device.h>
> +#include <drm/ttm/ttm_bo.h>
>   
>   #include <drm/drm_kunit_helpers.h>
>   #include <kunit/test.h>
> @@ -23,6 +24,10 @@ int ttm_kunit_helper_alloc_device(struct kunit *test,
>   				  bool use_dma_alloc,
>   				  bool use_dma32);
>   
> +struct ttm_buffer_object *ttm_kunit_helper_ttm_bo_init(struct kunit *test,
> +						       size_t size);
> +void ttm_kunit_helper_ttm_bo_fini(struct ttm_buffer_object *bo);
> +
>   int ttm_test_devices_init(struct kunit *test);
>   void ttm_test_devices_fini(struct kunit *test);
>   
> diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
> new file mode 100644
> index 000000000000..c95d3df023f3
> --- /dev/null
> +++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
> @@ -0,0 +1,401 @@
> +// SPDX-License-Identifier: GPL-2.0 AND MIT
> +/*
> + * Copyright © 2023 Intel Corporation
> + */
> +#include <linux/mm.h>
> +
> +#include <drm/ttm/ttm_tt.h>
> +#include <drm/ttm/ttm_pool.h>
> +
> +#include <ttm_kunit_helpers.h>
> +
> +struct ttm_pool_test_case {
> +	const char *description;
> +	unsigned int order;
> +	bool use_dma_alloc;
> +};
> +
> +static struct ttm_operation_ctx simple_ctx = {
> +	.interruptible = true,
> +	.no_wait_gpu = false,
> +};
> +
> +static struct ttm_tt *mock_ttm_tt_init(struct kunit *test,
> +				       uint32_t page_flags,
> +				       enum ttm_caching caching,
> +				       size_t size)
> +{
> +	struct ttm_tt *tt;
> +	struct ttm_buffer_object *bo;
> +	int err;
> +
> +	bo = ttm_kunit_helper_ttm_bo_init(test, size);
> +	KUNIT_ASSERT_NOT_NULL(test, bo);
> +
> +	tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
> +	KUNIT_ASSERT_NOT_NULL(test, tt);
> +
> +	err = ttm_tt_init(tt, bo, page_flags, caching, 0);
> +	KUNIT_ASSERT_EQ(test, err, 0);
> +
> +	/* We don't need this BO later, release it */
> +	ttm_kunit_helper_ttm_bo_fini(bo);
> +
> +	return tt;
> +}
> +
> +static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
> +					       size_t size,
> +					       enum ttm_caching caching)
> +{
> +	struct ttm_test_devices_priv *priv = test->priv;
> +	struct ttm_pool *pool;
> +	struct ttm_tt *tt;
> +	int err;
> +	unsigned long order = __fls(size / PAGE_SIZE);
> +
> +	tt = mock_ttm_tt_init(test, order, caching, size);
> +	KUNIT_ASSERT_NOT_NULL(test, tt);
> +
> +	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
> +	KUNIT_ASSERT_NOT_NULL(test, pool);
> +
> +	ttm_pool_init(pool, priv->dev, true, false);
> +
> +	err = ttm_pool_alloc(pool, tt, &simple_ctx);
> +	KUNIT_ASSERT_EQ(test, err, 0);
> +
> +	ttm_pool_free(pool, tt);
> +	ttm_tt_fini(tt);
> +
> +	return pool;
> +}
> +
> +static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
> +	{
> +		.description = "One page",
> +		.order = 0,
> +	},
> +	{
> +		.description = "More than one page",
> +		.order = 2,
> +	},
> +	{
> +		.description = "Above the allocation limit",
> +		.order = MAX_ORDER + 1,
> +	},
> +	{
> +		.description = "One page, with coherent DMA mappings enabled",
> +		.order = 0,
> +		.use_dma_alloc = true,
> +	},
> +	{
> +		.description = "Above the allocation limit, with coherent DMA mappings enabled",
> +		.order = MAX_ORDER + 1,
> +		.use_dma_alloc = true,
> +	},
> +};
> +
> +static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
> +				     char *desc)
> +{
> +	strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
> +}
> +
> +KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
> +		  ttm_pool_alloc_case_desc);
> +
> +static void ttm_pool_alloc_basic(struct kunit *test)
> +{
> +	struct ttm_test_devices_priv *priv = test->priv;
> +	const struct ttm_pool_test_case *params = test->param_value;
> +	struct ttm_tt *tt;
> +	struct ttm_pool *pool;
> +	struct page *fst_page, *last_page;
> +	int err;
> +	enum ttm_caching caching = ttm_uncached;
> +	unsigned int expected_num_pages = 1 << params->order;
> +	size_t size = expected_num_pages * PAGE_SIZE;
> +
> +	tt = mock_ttm_tt_init(test, 0, caching, size);
> +	KUNIT_ASSERT_NOT_NULL(test, tt);
> +
> +	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
> +	KUNIT_ASSERT_NOT_NULL(test, pool);
> +
> +	ttm_pool_init(pool, priv->dev, params->use_dma_alloc, false);
> +
> +	err = ttm_pool_alloc(pool, tt, &simple_ctx);
> +	KUNIT_ASSERT_EQ(test, err, 0);
> +	KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
> +
> +	fst_page = tt->pages[0];
> +	last_page = tt->pages[tt->num_pages - 1];
> +
> +	if (params->order <= MAX_ORDER) {
> +		if (params->use_dma_alloc) {
> +			KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
> +			KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
> +		} else {
> +			KUNIT_ASSERT_EQ(test, fst_page->private, params->order);

For order = 2 case, I expected the private payload for the last page to 
be equal 2, but it was 0. Is my test setup wrong, or am I 
misunderstanding something?

> +		}
> +	} else {
> +		if (params->use_dma_alloc) {
> +			KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
> +			KUNIT_ASSERT_NULL(test, (void *)last_page->private);

It makes sense that we don't DMA alloc here, but I wonder if we should 
at least warn that there are pages with no dma addresses. We asked for 
coherent mappings and that condition couldn't be satisfied. Maybe this 
is expected, just wanted to double-check.

All the best,
Karolina

> +		} else {
> +			/*
> +			 * We expect to alloc one big block, followed by
> +			 * order 0 blocks
> +			 */
> +			KUNIT_ASSERT_EQ(test, fst_page->private,
> +					min_t(unsigned int, MAX_ORDER,
> +					      params->order));
> +			KUNIT_ASSERT_EQ(test, last_page->private, 0);
> +		}
> +	}
> +
> +	ttm_pool_free(pool, tt);
> +	ttm_tt_fini(tt);
> +	ttm_pool_fini(pool);
> +}
> +
> +static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
> +{
> +	struct ttm_test_devices_priv *priv = test->priv;
> +	const struct ttm_pool_test_case *params = test->param_value;
> +	struct ttm_tt *tt;
> +	struct ttm_pool *pool;
> +	struct ttm_buffer_object *bo;
> +	dma_addr_t dma1, dma2;
> +	int err;
> +	enum ttm_caching caching = ttm_uncached;
> +	unsigned int expected_num_pages = 1 << params->order;
> +	size_t size = expected_num_pages * PAGE_SIZE;
> +
> +	tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
> +	KUNIT_ASSERT_NOT_NULL(test, tt);
> +
> +	bo = ttm_kunit_helper_ttm_bo_init(test, size);
> +	KUNIT_ASSERT_NOT_NULL(test, bo);
> +
> +	err = ttm_sg_tt_init(tt, bo, 0, caching);
> +	KUNIT_ASSERT_EQ(test, err, 0);
> +
> +	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
> +	KUNIT_ASSERT_NOT_NULL(test, pool);
> +
> +	ttm_pool_init(pool, priv->dev, true, false);
> +
> +	err = ttm_pool_alloc(pool, tt, &simple_ctx);
> +	KUNIT_ASSERT_EQ(test, err, 0);
> +	KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
> +
> +	dma1 = tt->dma_address[0];
> +	dma2 = tt->dma_address[tt->num_pages - 1];
> +
> +	KUNIT_ASSERT_NOT_NULL(test, (void *)dma1);
> +	KUNIT_ASSERT_NOT_NULL(test, (void *)dma2);
> +
> +	ttm_pool_free(pool, tt);
> +	ttm_tt_fini(tt);
> +	ttm_pool_fini(pool);
> +}
> +
> +static void ttm_pool_alloc_order_caching_match(struct kunit *test)
> +{
> +	struct ttm_tt *tt;
> +	struct ttm_pool *pool;
> +	struct ttm_pool_type *pt;
> +	enum ttm_caching caching = ttm_uncached;
> +	unsigned int order = 0;
> +	size_t size = PAGE_SIZE;
> +	int err;
> +
> +	pool = ttm_pool_pre_populated(test, size, caching);
> +
> +	pt = &pool->caching[caching].orders[order];
> +	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
> +
> +	tt = mock_ttm_tt_init(test, 0, caching, size);
> +	KUNIT_ASSERT_NOT_NULL(test, tt);
> +
> +	err = ttm_pool_alloc(pool, tt, &simple_ctx);
> +	KUNIT_ASSERT_EQ(test, err, 0);
> +
> +	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
> +
> +	ttm_pool_free(pool, tt);
> +	ttm_tt_fini(tt);
> +	ttm_pool_fini(pool);
> +}
> +
> +static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
> +{
> +	struct ttm_tt *tt;
> +	struct ttm_pool *pool;
> +	struct ttm_pool_type *pt_pool, *pt_tt;
> +	int err;
> +	enum ttm_caching tt_caching = ttm_uncached;
> +	enum ttm_caching pool_caching = ttm_cached;
> +	size_t size = PAGE_SIZE;
> +	unsigned int order = 0;
> +
> +	pool = ttm_pool_pre_populated(test, size, pool_caching);
> +
> +	pt_pool = &pool->caching[pool_caching].orders[order];
> +	pt_tt = &pool->caching[tt_caching].orders[order];
> +
> +	tt = mock_ttm_tt_init(test, 0, tt_caching, size);
> +	KUNIT_ASSERT_NOT_NULL(test, tt);
> +
> +	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
> +	KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
> +
> +	err = ttm_pool_alloc(pool, tt, &simple_ctx);
> +	KUNIT_ASSERT_EQ(test, err, 0);
> +
> +	ttm_pool_free(pool, tt);
> +	ttm_tt_fini(tt);
> +
> +	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
> +	KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
> +
> +	ttm_pool_fini(pool);
> +}
> +
> +static void ttm_pool_alloc_order_mismatch(struct kunit *test)
> +{
> +	struct ttm_tt *tt;
> +	struct ttm_pool *pool;
> +	struct ttm_pool_type *pt_pool, *pt_tt;
> +	int err;
> +	enum ttm_caching caching = ttm_uncached;
> +	unsigned int order = 2;
> +	size_t fst_size = (1 << order) * PAGE_SIZE;
> +	size_t snd_size = PAGE_SIZE;
> +
> +	pool = ttm_pool_pre_populated(test, fst_size, caching);
> +
> +	pt_pool = &pool->caching[caching].orders[order];
> +	pt_tt = &pool->caching[caching].orders[0];
> +
> +	tt = mock_ttm_tt_init(test, 0, caching, snd_size);
> +	KUNIT_ASSERT_NOT_NULL(test, tt);
> +
> +	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
> +	KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
> +
> +	err = ttm_pool_alloc(pool, tt, &simple_ctx);
> +	KUNIT_ASSERT_EQ(test, err, 0);
> +
> +	ttm_pool_free(pool, tt);
> +	ttm_tt_fini(tt);
> +
> +	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
> +	KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
> +
> +	ttm_pool_fini(pool);
> +}
> +
> +static void ttm_pool_free_dma_alloc(struct kunit *test)
> +{
> +	struct ttm_test_devices_priv *priv = test->priv;
> +	struct ttm_tt *tt;
> +	struct ttm_pool *pool;
> +	struct ttm_pool_type *pt;
> +	enum ttm_caching caching = ttm_uncached;
> +	unsigned int order = 2;
> +	size_t size = (1 << order) * PAGE_SIZE;
> +
> +	tt = mock_ttm_tt_init(test, 0, caching, size);
> +	KUNIT_ASSERT_NOT_NULL(test, tt);
> +
> +	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
> +	KUNIT_ASSERT_NOT_NULL(test, pool);
> +
> +	ttm_pool_init(pool, priv->dev, true, false);
> +	ttm_pool_alloc(pool, tt, &simple_ctx);
> +
> +	pt = &pool->caching[caching].orders[order];
> +	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
> +
> +	ttm_pool_free(pool, tt);
> +	ttm_tt_fini(tt);
> +
> +	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
> +
> +	ttm_pool_fini(pool);
> +}
> +
> +static void ttm_pool_free_no_dma_alloc(struct kunit *test)
> +{
> +	struct ttm_test_devices_priv *priv = test->priv;
> +	struct ttm_tt *tt;
> +	struct ttm_pool *pool;
> +	struct ttm_pool_type *pt;
> +	enum ttm_caching caching = ttm_uncached;
> +	unsigned int order = 2;
> +	size_t size = (1 << order) * PAGE_SIZE;
> +
> +	tt = mock_ttm_tt_init(test, 0, caching, size);
> +	KUNIT_ASSERT_NOT_NULL(test, tt);
> +
> +	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
> +	KUNIT_ASSERT_NOT_NULL(test, pool);
> +
> +	ttm_pool_init(pool, priv->dev, false, false);
> +	ttm_pool_alloc(pool, tt, &simple_ctx);
> +
> +	pt = &pool->caching[caching].orders[order];
> +	KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
> +
> +	ttm_pool_free(pool, tt);
> +	ttm_tt_fini(tt);
> +
> +	KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
> +
> +	ttm_pool_fini(pool);
> +}
> +
> +static void ttm_pool_fini_basic(struct kunit *test)
> +{
> +	struct ttm_pool *pool;
> +	struct ttm_pool_type *pt;
> +	enum ttm_caching caching = ttm_uncached;
> +	unsigned int order = 0;
> +	size_t size = PAGE_SIZE;
> +
> +	pool = ttm_pool_pre_populated(test, size, caching);
> +	pt = &pool->caching[caching].orders[order];
> +
> +	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
> +
> +	ttm_pool_fini(pool);
> +
> +	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
> +}
> +
> +static struct kunit_case ttm_pool_test_cases[] = {
> +	KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
> +	KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
> +			 ttm_pool_alloc_basic_gen_params),
> +	KUNIT_CASE(ttm_pool_alloc_order_caching_match),
> +	KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
> +	KUNIT_CASE(ttm_pool_alloc_order_mismatch),
> +	KUNIT_CASE(ttm_pool_free_dma_alloc),
> +	KUNIT_CASE(ttm_pool_free_no_dma_alloc),
> +	KUNIT_CASE(ttm_pool_fini_basic),
> +	{}
> +};
> +
> +static struct kunit_suite ttm_pool_test_suite = {
> +	.name = "ttm_pool",
> +	.init = ttm_test_devices_init,
> +	.exit = ttm_test_devices_fini,
> +	.test_cases = ttm_pool_test_cases,
> +};
> +
> +kunit_test_suites(&ttm_pool_test_suite);
> +
> +MODULE_LICENSE("GPL");


More information about the dri-devel mailing list