[Intel-gfx] [PATCH v2 1/1] drm/mm: add ig_frag selftest

Christian König christian.koenig at amd.com
Fri Jun 5 08:39:46 UTC 2020


Am 05.06.20 um 10:18 schrieb Nirmoy:
>
> On 6/5/20 9:45 AM, Christian König wrote:
>> Am 03.06.20 um 12:32 schrieb Nirmoy Das:
>>> This patch introduces fragmentation in the address range
>>> and measures time taken by 10k and 20k insertions. ig_frag()
>>> will fail if the time taken by 20k insertions takes more than
>>> 4 times of 10k insertions as we know that insertions should at
>>> most scale quadratically.
>>>
>>> v2:
>>> introduce fragmentation by freeing every other node.
>>> only test bottom-up and top-down for now.
>>>
>>> Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
>>> ---
>>>   drivers/gpu/drm/selftests/drm_mm_selftests.h |   1 +
>>>   drivers/gpu/drm/selftests/test-drm_mm.c      | 124 
>>> +++++++++++++++++++
>>>   2 files changed, 125 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h 
>>> b/drivers/gpu/drm/selftests/drm_mm_selftests.h
>>> index 6b943ea1c57d..8c87c964176b 100644
>>> --- a/drivers/gpu/drm/selftests/drm_mm_selftests.h
>>> +++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h
>>> @@ -14,6 +14,7 @@ selftest(insert, igt_insert)
>>>   selftest(replace, igt_replace)
>>>   selftest(insert_range, igt_insert_range)
>>>   selftest(align, igt_align)
>>> +selftest(frag, igt_frag)
>>>   selftest(align32, igt_align32)
>>>   selftest(align64, igt_align64)
>>>   selftest(evict, igt_evict)
>>> diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c 
>>> b/drivers/gpu/drm/selftests/test-drm_mm.c
>>> index 9aabe82dcd3a..34231baacd87 100644
>>> --- a/drivers/gpu/drm/selftests/test-drm_mm.c
>>> +++ b/drivers/gpu/drm/selftests/test-drm_mm.c
>>> @@ -10,6 +10,7 @@
>>>   #include <linux/slab.h>
>>>   #include <linux/random.h>
>>>   #include <linux/vmalloc.h>
>>> +#include <linux/ktime.h>
>>>     #include <drm/drm_mm.h>
>>>   @@ -1033,6 +1034,129 @@ static int igt_insert_range(void *ignored)
>>>       return 0;
>>>   }
>>>   +static int prepare_igt_frag(struct drm_mm *mm,
>>> +                struct drm_mm_node *nodes,
>>> +                unsigned int num_insert,
>>> +                const struct insert_mode *mode)
>>> +{
>>> +    unsigned int size = 4096;
>>> +    unsigned int i;
>>> +    u64 ret = -EINVAL;
>>> +
>>> +    for (i = 0; i < num_insert; i++) {
>>> +        if (!expect_insert(mm, &nodes[i], size, 0, i,
>>> +                   mode) != 0) {
>>> +            pr_err("%s insert failed\n", mode->name);
>>> +            goto out;
>>> +        }
>>> +    }
>>> +
>>> +    /* introduce fragmentation by freeing every other node */
>>> +    for (i = 0; i < num_insert; i++) {
>>> +        if (i % 2 == 0)
>>> +            drm_mm_remove_node(&nodes[i]);
>>> +    }
>>> +
>>> +out:
>>> +    return ret;
>>> +
>>> +}
>>> +
>>> +static u64 get_insert_time(struct drm_mm *mm,
>>> +               unsigned int num_insert,
>>> +               struct drm_mm_node *nodes,
>>> +               const struct insert_mode *mode)
>>> +{
>>> +    unsigned int size = 8192;
>>> +    ktime_t start;
>>> +    unsigned int i;
>>> +    u64 ret = -EINVAL;
>>> +
>>> +    start = ktime_get();
>>> +    for (i = 0; i < num_insert; i++) {
>>> +        if (!expect_insert(mm, &nodes[i], size, 0, i, mode) != 0) {
>>> +            pr_err("%s insert failed\n", mode->name);
>>> +            goto out;
>>> +        }
>>> +    }
>>> +
>>> +    ret = ktime_to_ns(ktime_sub(ktime_get(), start));
>>> +
>>> +out:
>>> +    return ret;
>>> +
>>> +}
>>> +
>>> +static int igt_frag(void *ignored)
>>> +{
>>> +    struct drm_mm mm;
>>> +    const struct insert_mode *mode;
>>> +    struct drm_mm_node *nodes, *node, *next;
>>> +    unsigned int insert_size = 10000;
>>> +    unsigned int scale_factor = 4;
>>> +    int ret = -EINVAL;
>>> +
>>> +    /* We need 4 * insert_size nodes to hold intermediate allocated
>>> +     * drm_mm nodes.
>>> +     * 1 times for prepare_igt_frag()
>>> +     * 1 times for get_insert_time()
>>> +     * 2 times for  get_insert_time()
>>> +     */
>>> +    nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes)));
>>> +    if (!nodes)
>>> +        return -ENOMEM;
>>> +
>>> +    /* For BOTTOMUP and TOPDOWN, we first fragment the
>>> +     * address space using prepare_igt_frag() and then try to verify
>>> +     * that that insertions scale quadratically from 10k to 20k 
>>> insertions
>>> +     */
>>> +    drm_mm_init(&mm, 1, U64_MAX - 2);
>>> +    for (mode = insert_modes; mode->name; mode++) {
>>> +        u64 insert_time1, insert_time2;
>>> +
>>> +        if (mode->mode != DRM_MM_INSERT_LOW ||
>>> +            mode->mode != DRM_MM_INSERT_HIGH)
>>> +            continue;
>>
>> This check here is wrong, that needs to be && instead of || or the 
>> test wouldn't execute at all.
>
>
> I didn't bother to check dmesg after adding that "simple" check and 
> the test ran fine. :/

Yeah, after that the test seems to work. But there are is another issues.

We only cut of the right or the left branch of the tree and that still 
makes the implementation rather inefficient.

In other words we first go down leftmost or rightmost even if we know 
that this way is no valuable candidate and then back off again towards 
the top.

Going to look into this, but your patches already improves insertion 
time by a factor of nearly 30 in a fragmented address space. That is 
rather nice.

Regards,
Christian.

>
> Sending again.
>
> Nirmoy
>
>
>>
>> Christian.
>>
>>> +
>>> +        ret = prepare_igt_frag(&mm, nodes, insert_size, mode);
>>> +        if (!ret)
>>> +            goto err;
>>> +
>>> +        insert_time1 = get_insert_time(&mm, insert_size,
>>> +                           nodes + insert_size, mode);
>>> +        if (insert_time1 < 0)
>>> +            goto err;
>>> +
>>> +        insert_time2 = get_insert_time(&mm, (insert_size * 2),
>>> +                           nodes + insert_size * 2, mode);
>>> +        if (insert_time2 < 0)
>>> +            goto err;
>>> +
>>> +        pr_info("%s fragmented insert of %u and %u insertions took 
>>> %llu and %llu nsecs\n",
>>> +            mode->name, insert_size, insert_size * 2,
>>> +            insert_time1, insert_time2);
>>> +
>>> +        if (insert_time2 > (scale_factor * insert_time1)) {
>>> +            pr_err("%s fragmented insert took %llu nsecs more\n",
>>> +                   mode->name,
>>> +                   insert_time2 - (scale_factor * insert_time1));
>>> +            goto err;
>>> +        }
>>> +
>>> +        drm_mm_for_each_node_safe(node, next, &mm)
>>> +            drm_mm_remove_node(node);
>>> +    }
>>> +
>>> +    ret = 0;
>>> +err:
>>> +    drm_mm_for_each_node_safe(node, next, &mm)
>>> +        drm_mm_remove_node(node);
>>> +    drm_mm_takedown(&mm);
>>> +    vfree(nodes);
>>> +
>>> +    return ret;
>>> +}
>>> +
>>>   static int igt_align(void *ignored)
>>>   {
>>>       const struct insert_mode *mode;
>>



More information about the Intel-gfx mailing list