[RFC v3 3/3] drm/ttm/tests: Add tests for ttm_pool
Christian König
christian.koenig at amd.com
Tue Jul 4 07:01:30 UTC 2023
Am 04.07.23 um 08:35 schrieb Karolina Stolarek:
> On 3.07.2023 13:21, Christian König wrote:
>>
>>
>> Am 03.07.23 um 09:58 schrieb Karolina Stolarek:
>>> Add KUnit tests that exercise page allocation using page pools
>>> and freeing pages, either by returning them to the pool or
>>> freeing them. Add a basic test for ttm_pool cleanup. Introduce
>>> helpers to create a dummy ttm_buffer_object.
>>>
>>> Signed-off-by: Karolina Stolarek <karolina.stolarek at intel.com>
>>> ---
>>> drivers/gpu/drm/ttm/tests/Makefile | 1 +
>>> drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c | 15 +
>>> drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h | 4 +
>>> drivers/gpu/drm/ttm/tests/ttm_pool_test.c | 405
>>> ++++++++++++++++++
>>> 4 files changed, 425 insertions(+)
>>> create mode 100644 drivers/gpu/drm/ttm/tests/ttm_pool_test.c
>>>
>>> diff --git a/drivers/gpu/drm/ttm/tests/Makefile
>>> b/drivers/gpu/drm/ttm/tests/Makefile
>>> index 7917805f37af..ec87c4fc1ad5 100644
>>> --- a/drivers/gpu/drm/ttm/tests/Makefile
>>> +++ b/drivers/gpu/drm/ttm/tests/Makefile
>>> @@ -2,4 +2,5 @@
>>> obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += \
>>> ttm_device_test.o \
>>> + ttm_pool_test.o \
>>> ttm_kunit_helpers.o
>>> diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
>>> b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
>>> index 428a8a4ac9dc..9384679fccb0 100644
>>> --- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
>>> +++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
>>> @@ -26,6 +26,21 @@ int ttm_kunit_helper_init_device(struct kunit *test,
>>> }
>>> EXPORT_SYMBOL_GPL(ttm_kunit_helper_init_device);
>>> +struct ttm_buffer_object *ttm_kunit_helper_ttm_bo_init(struct kunit
>>> *test,
>>> + size_t size)
>>> +{
>>> + struct drm_gem_object gem_obj = { .size = size };
>>> + struct ttm_buffer_object *bo;
>>> +
>>> + bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
>>> + KUNIT_ASSERT_NOT_NULL(test, bo);
>>> +
>>> + bo->base = gem_obj;
>>> +
>>> + return bo;
>>> +}
>>> +EXPORT_SYMBOL_GPL(ttm_kunit_helper_ttm_bo_init);
>>> +
>>> int ttm_test_devices_init(struct kunit *test)
>>> {
>>> struct ttm_test_devices_priv *priv;
>>> diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
>>> b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
>>> index bf0b1000a8ee..41ad38dd34e5 100644
>>> --- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
>>> +++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
>>> @@ -7,6 +7,7 @@
>>> #include <drm/drm_drv.h>
>>> #include <drm/ttm/ttm_device.h>
>>> +#include <drm/ttm/ttm_bo.h>
>>> #include <drm/drm_kunit_helpers.h>
>>> #include <kunit/test.h>
>>> @@ -23,6 +24,9 @@ int ttm_kunit_helper_init_device(struct kunit *test,
>>> bool use_dma_alloc,
>>> bool use_dma32);
>>> +struct ttm_buffer_object *ttm_kunit_helper_ttm_bo_init(struct kunit
>>> *test,
>>> + size_t size);
>>> +
>>> int ttm_test_devices_init(struct kunit *test);
>>> void ttm_test_devices_fini(struct kunit *test);
>>> diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
>>> b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
>>> new file mode 100644
>>> index 000000000000..85806f72df22
>>> --- /dev/null
>>> +++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
>>> @@ -0,0 +1,405 @@
>>> +// SPDX-License-Identifier: GPL-2.0 AND MIT
>>> +/*
>>> + * Copyright © 2023 Intel Corporation
>>> + */
>>> +#include <linux/mm.h>
>>> +
>>> +#include <drm/ttm/ttm_tt.h>
>>> +#include <drm/ttm/ttm_pool.h>
>>> +
>>> +#include "ttm_kunit_helpers.h"
>>> +
>>> +struct ttm_pool_test_case {
>>> + const char *description;
>>> + unsigned int order;
>>> + bool use_dma_alloc;
>>> +};
>>> +
>>> +static struct ttm_operation_ctx simple_ctx = {
>>> + .interruptible = true,
>>> + .no_wait_gpu = false,
>>> +};
>>> +
>>> +static struct ttm_tt *mock_ttm_tt_init(struct kunit *test,
>>> + uint32_t page_flags,
>>> + enum ttm_caching caching,
>>> + size_t size)
>>> +{
>>> + struct ttm_tt *tt;
>>> + struct ttm_buffer_object *bo;
>>> + int err;
>>> +
>>> + bo = ttm_kunit_helper_ttm_bo_init(test, size);
>>> + KUNIT_ASSERT_NOT_NULL(test, bo);
>>> +
>>> + tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
>>> + KUNIT_ASSERT_NOT_NULL(test, tt);
>>> +
>>> + err = ttm_tt_init(tt, bo, page_flags, caching, 0);
>>> + KUNIT_ASSERT_EQ(test, err, 0);
>>> +
>>> + /* We don't need this BO later, release it */
>>> + ttm_bo_put(bo);
>>
>> That won't work. A BO must always outlive the TT object allocated for
>> it. >
>> Otherwise you can get crashes during TT release.
>
> OK, I can keep it around, probably as an another test->priv member.
>
> I thought that for these mocks it should be fine --
> ttm_tt_init_fields() only takes the scatter list from BO which is not
> in use (as for now, at least) and in ttm_tt_fini() we don't even touch
> that field.
Yeah, it might work for the mock test. But this is not how the interface
is supposed to be used.
The lifetime of the TT object is tied to the one of the BO, e.g. when
the BO is destroyed and has a TT object associated with it that object
is destroyed as well.
Regards,
Christian.
>
> All the best,
> Karolina
>
>>
>> Regards,
>> Christian.
>>
>>> +
>>> + return tt;
>>> +}
>>> +
>>> +static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
>>> + size_t size,
>>> + enum ttm_caching caching)
>>> +{
>>> + struct ttm_test_devices_priv *priv = test->priv;
>>> + struct ttm_pool *pool;
>>> + struct ttm_tt *tt;
>>> + int err;
>>> + unsigned long order = __fls(size / PAGE_SIZE);
>>> +
>>> + tt = mock_ttm_tt_init(test, order, caching, size);
>>> + KUNIT_ASSERT_NOT_NULL(test, tt);
>>> +
>>> + pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
>>> + KUNIT_ASSERT_NOT_NULL(test, pool);
>>> +
>>> + ttm_pool_init(pool, priv->dev, NUMA_NO_NODE, true, false);
>>> +
>>> + err = ttm_pool_alloc(pool, tt, &simple_ctx);
>>> + KUNIT_ASSERT_EQ(test, err, 0);
>>> +
>>> + ttm_pool_free(pool, tt);
>>> + ttm_tt_fini(tt);
>>> +
>>> + return pool;
>>> +}
>>> +
>>> +static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
>>> + {
>>> + .description = "One page",
>>> + .order = 0,
>>> + },
>>> + {
>>> + .description = "More than one page",
>>> + .order = 2,
>>> + },
>>> + {
>>> + .description = "Above the allocation limit",
>>> + .order = MAX_ORDER + 1,
>>> + },
>>> + {
>>> + .description = "One page, with coherent DMA mappings enabled",
>>> + .order = 0,
>>> + .use_dma_alloc = true,
>>> + },
>>> + {
>>> + .description = "Above the allocation limit, with coherent
>>> DMA mappings enabled",
>>> + .order = MAX_ORDER + 1,
>>> + .use_dma_alloc = true,
>>> + },
>>> +};
>>> +
>>> +static void ttm_pool_alloc_case_desc(const struct
>>> ttm_pool_test_case *t,
>>> + char *desc)
>>> +{
>>> + strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
>>> +}
>>> +
>>> +KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
>>> + ttm_pool_alloc_case_desc);
>>> +
>>> +static void ttm_pool_alloc_basic(struct kunit *test)
>>> +{
>>> + struct ttm_test_devices_priv *priv = test->priv;
>>> + const struct ttm_pool_test_case *params = test->param_value;
>>> + struct ttm_tt *tt;
>>> + struct ttm_pool *pool;
>>> + struct page *fst_page, *last_page;
>>> + int err;
>>> + enum ttm_caching caching = ttm_uncached;
>>> + unsigned int expected_num_pages = 1 << params->order;
>>> + size_t size = expected_num_pages * PAGE_SIZE;
>>> +
>>> + tt = mock_ttm_tt_init(test, 0, caching, size);
>>> + KUNIT_ASSERT_NOT_NULL(test, tt);
>>> +
>>> + pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
>>> + KUNIT_ASSERT_NOT_NULL(test, pool);
>>> + ttm_pool_init(pool, priv->dev, NUMA_NO_NODE,
>>> params->use_dma_alloc,
>>> + false);
>>> +
>>> + KUNIT_ASSERT_PTR_EQ(test, pool->dev, priv->dev);
>>> + KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
>>> + KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
>>> +
>>> + err = ttm_pool_alloc(pool, tt, &simple_ctx);
>>> + KUNIT_ASSERT_EQ(test, err, 0);
>>> + KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
>>> +
>>> + fst_page = tt->pages[0];
>>> + last_page = tt->pages[tt->num_pages - 1];
>>> +
>>> + if (params->order <= MAX_ORDER) {
>>> + if (params->use_dma_alloc) {
>>> + KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
>>> + KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
>>> + } else {
>>> + KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
>>> + }
>>> + } else {
>>> + if (params->use_dma_alloc) {
>>> + KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
>>> + KUNIT_ASSERT_NULL(test, (void *)last_page->private);
>>> + } else {
>>> + /*
>>> + * We expect to alloc one big block, followed by
>>> + * order 0 blocks
>>> + */
>>> + KUNIT_ASSERT_EQ(test, fst_page->private,
>>> + min_t(unsigned int, MAX_ORDER,
>>> + params->order));
>>> + KUNIT_ASSERT_EQ(test, last_page->private, 0);
>>> + }
>>> + }
>>> +
>>> + ttm_pool_free(pool, tt);
>>> + ttm_tt_fini(tt);
>>> + ttm_pool_fini(pool);
>>> +}
>>> +
>>> +static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
>>> +{
>>> + struct ttm_test_devices_priv *priv = test->priv;
>>> + const struct ttm_pool_test_case *params = test->param_value;
>>> + struct ttm_tt *tt;
>>> + struct ttm_pool *pool;
>>> + struct ttm_buffer_object *bo;
>>> + dma_addr_t dma1, dma2;
>>> + int err;
>>> + enum ttm_caching caching = ttm_uncached;
>>> + unsigned int expected_num_pages = 1 << params->order;
>>> + size_t size = expected_num_pages * PAGE_SIZE;
>>> +
>>> + tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
>>> + KUNIT_ASSERT_NOT_NULL(test, tt);
>>> +
>>> + bo = ttm_kunit_helper_ttm_bo_init(test, size);
>>> + KUNIT_ASSERT_NOT_NULL(test, bo);
>>> +
>>> + err = ttm_sg_tt_init(tt, bo, 0, caching);
>>> + KUNIT_ASSERT_EQ(test, err, 0);
>>> +
>>> + pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
>>> + KUNIT_ASSERT_NOT_NULL(test, pool);
>>> +
>>> + ttm_pool_init(pool, priv->dev, NUMA_NO_NODE, true, false);
>>> +
>>> + err = ttm_pool_alloc(pool, tt, &simple_ctx);
>>> + KUNIT_ASSERT_EQ(test, err, 0);
>>> + KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
>>> +
>>> + dma1 = tt->dma_address[0];
>>> + dma2 = tt->dma_address[tt->num_pages - 1];
>>> +
>>> + KUNIT_ASSERT_NOT_NULL(test, (void *)dma1);
>>> + KUNIT_ASSERT_NOT_NULL(test, (void *)dma2);
>>> +
>>> + ttm_pool_free(pool, tt);
>>> + ttm_tt_fini(tt);
>>> + ttm_pool_fini(pool);
>>> +}
>>> +
>>> +static void ttm_pool_alloc_order_caching_match(struct kunit *test)
>>> +{
>>> + struct ttm_tt *tt;
>>> + struct ttm_pool *pool;
>>> + struct ttm_pool_type *pt;
>>> + enum ttm_caching caching = ttm_uncached;
>>> + unsigned int order = 0;
>>> + size_t size = PAGE_SIZE;
>>> + int err;
>>> +
>>> + pool = ttm_pool_pre_populated(test, size, caching);
>>> +
>>> + pt = &pool->caching[caching].orders[order];
>>> + KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
>>> +
>>> + tt = mock_ttm_tt_init(test, 0, caching, size);
>>> + KUNIT_ASSERT_NOT_NULL(test, tt);
>>> +
>>> + err = ttm_pool_alloc(pool, tt, &simple_ctx);
>>> + KUNIT_ASSERT_EQ(test, err, 0);
>>> +
>>> + KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
>>> +
>>> + ttm_pool_free(pool, tt);
>>> + ttm_tt_fini(tt);
>>> + ttm_pool_fini(pool);
>>> +}
>>> +
>>> +static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
>>> +{
>>> + struct ttm_tt *tt;
>>> + struct ttm_pool *pool;
>>> + struct ttm_pool_type *pt_pool, *pt_tt;
>>> + int err;
>>> + enum ttm_caching tt_caching = ttm_uncached;
>>> + enum ttm_caching pool_caching = ttm_cached;
>>> + size_t size = PAGE_SIZE;
>>> + unsigned int order = 0;
>>> +
>>> + pool = ttm_pool_pre_populated(test, size, pool_caching);
>>> +
>>> + pt_pool = &pool->caching[pool_caching].orders[order];
>>> + pt_tt = &pool->caching[tt_caching].orders[order];
>>> +
>>> + tt = mock_ttm_tt_init(test, 0, tt_caching, size);
>>> + KUNIT_ASSERT_NOT_NULL(test, tt);
>>> +
>>> + KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
>>> + KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
>>> +
>>> + err = ttm_pool_alloc(pool, tt, &simple_ctx);
>>> + KUNIT_ASSERT_EQ(test, err, 0);
>>> +
>>> + ttm_pool_free(pool, tt);
>>> + ttm_tt_fini(tt);
>>> +
>>> + KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
>>> + KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
>>> +
>>> + ttm_pool_fini(pool);
>>> +}
>>> +
>>> +static void ttm_pool_alloc_order_mismatch(struct kunit *test)
>>> +{
>>> + struct ttm_tt *tt;
>>> + struct ttm_pool *pool;
>>> + struct ttm_pool_type *pt_pool, *pt_tt;
>>> + int err;
>>> + enum ttm_caching caching = ttm_uncached;
>>> + unsigned int order = 2;
>>> + size_t fst_size = (1 << order) * PAGE_SIZE;
>>> + size_t snd_size = PAGE_SIZE;
>>> +
>>> + pool = ttm_pool_pre_populated(test, fst_size, caching);
>>> +
>>> + pt_pool = &pool->caching[caching].orders[order];
>>> + pt_tt = &pool->caching[caching].orders[0];
>>> +
>>> + tt = mock_ttm_tt_init(test, 0, caching, snd_size);
>>> + KUNIT_ASSERT_NOT_NULL(test, tt);
>>> +
>>> + KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
>>> + KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
>>> +
>>> + err = ttm_pool_alloc(pool, tt, &simple_ctx);
>>> + KUNIT_ASSERT_EQ(test, err, 0);
>>> +
>>> + ttm_pool_free(pool, tt);
>>> + ttm_tt_fini(tt);
>>> +
>>> + KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
>>> + KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
>>> +
>>> + ttm_pool_fini(pool);
>>> +}
>>> +
>>> +static void ttm_pool_free_dma_alloc(struct kunit *test)
>>> +{
>>> + struct ttm_test_devices_priv *priv = test->priv;
>>> + struct ttm_tt *tt;
>>> + struct ttm_pool *pool;
>>> + struct ttm_pool_type *pt;
>>> + enum ttm_caching caching = ttm_uncached;
>>> + unsigned int order = 2;
>>> + size_t size = (1 << order) * PAGE_SIZE;
>>> +
>>> + tt = mock_ttm_tt_init(test, 0, caching, size);
>>> + KUNIT_ASSERT_NOT_NULL(test, tt);
>>> +
>>> + pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
>>> + KUNIT_ASSERT_NOT_NULL(test, pool);
>>> +
>>> + ttm_pool_init(pool, priv->dev, NUMA_NO_NODE, true, false);
>>> + ttm_pool_alloc(pool, tt, &simple_ctx);
>>> +
>>> + pt = &pool->caching[caching].orders[order];
>>> + KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
>>> +
>>> + ttm_pool_free(pool, tt);
>>> + ttm_tt_fini(tt);
>>> +
>>> + KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
>>> +
>>> + ttm_pool_fini(pool);
>>> +}
>>> +
>>> +static void ttm_pool_free_no_dma_alloc(struct kunit *test)
>>> +{
>>> + struct ttm_test_devices_priv *priv = test->priv;
>>> + struct ttm_tt *tt;
>>> + struct ttm_pool *pool;
>>> + struct ttm_pool_type *pt;
>>> + enum ttm_caching caching = ttm_uncached;
>>> + unsigned int order = 2;
>>> + size_t size = (1 << order) * PAGE_SIZE;
>>> +
>>> + tt = mock_ttm_tt_init(test, 0, caching, size);
>>> + KUNIT_ASSERT_NOT_NULL(test, tt);
>>> +
>>> + pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
>>> + KUNIT_ASSERT_NOT_NULL(test, pool);
>>> +
>>> + ttm_pool_init(pool, priv->dev, NUMA_NO_NODE, false, false);
>>> + ttm_pool_alloc(pool, tt, &simple_ctx);
>>> +
>>> + pt = &pool->caching[caching].orders[order];
>>> + KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
>>> +
>>> + ttm_pool_free(pool, tt);
>>> + ttm_tt_fini(tt);
>>> +
>>> + KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
>>> +
>>> + ttm_pool_fini(pool);
>>> +}
>>> +
>>> +static void ttm_pool_fini_basic(struct kunit *test)
>>> +{
>>> + struct ttm_pool *pool;
>>> + struct ttm_pool_type *pt;
>>> + enum ttm_caching caching = ttm_uncached;
>>> + unsigned int order = 0;
>>> + size_t size = PAGE_SIZE;
>>> +
>>> + pool = ttm_pool_pre_populated(test, size, caching);
>>> + pt = &pool->caching[caching].orders[order];
>>> +
>>> + KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
>>> +
>>> + ttm_pool_fini(pool);
>>> +
>>> + KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
>>> +}
>>> +
>>> +static struct kunit_case ttm_pool_test_cases[] = {
>>> + KUNIT_CASE_PARAM(ttm_pool_alloc_basic,
>>> ttm_pool_alloc_basic_gen_params),
>>> + KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
>>> + ttm_pool_alloc_basic_gen_params),
>>> + KUNIT_CASE(ttm_pool_alloc_order_caching_match),
>>> + KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
>>> + KUNIT_CASE(ttm_pool_alloc_order_mismatch),
>>> + KUNIT_CASE(ttm_pool_free_dma_alloc),
>>> + KUNIT_CASE(ttm_pool_free_no_dma_alloc),
>>> + KUNIT_CASE(ttm_pool_fini_basic),
>>> + {}
>>> +};
>>> +
>>> +static struct kunit_suite ttm_pool_test_suite = {
>>> + .name = "ttm_pool",
>>> + .init = ttm_test_devices_init,
>>> + .exit = ttm_test_devices_fini,
>>> + .test_cases = ttm_pool_test_cases,
>>> +};
>>> +
>>> +kunit_test_suites(&ttm_pool_test_suite);
>>> +
>>> +MODULE_LICENSE("GPL");
>>
More information about the dri-devel
mailing list