[igt-dev] [PATCH i-g-t 1/1] lib/intel_allocator: Move the ioctl calls to client processes
Zbigniew Kempczyński
zbigniew.kempczynski at intel.com
Thu May 27 06:58:06 UTC 2021
On Wed, May 26, 2021 at 05:43:11PM +0200, Andrzej Turko wrote:
> When running the allocator in multiprocess mode, all queries
> are processed in a designated thread in the parent process.
> However, a child process may request opening the allocator
> for a gpu using a file descriptor absent in the parent process.
> Hence, querying available gtt size must be done in the child
> instead of the parent process.
>
> This modification has triggered slight changes to the
> creation of random and reloc allocators.
>
> Signed-off-by: Andrzej Turko <andrzej.turko at linux.intel.com>
> Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
> ---
> lib/intel_allocator.c | 51 +++++++++++++++++++++++---------
> lib/intel_allocator_random.c | 27 +++++++----------
> lib/intel_allocator_reloc.c | 20 ++++---------
> lib/intel_allocator_simple.c | 44 ++++-----------------------
> tests/i915/api_intel_allocator.c | 47 +++++++++++++++++++++++++++++
> 5 files changed, 104 insertions(+), 85 deletions(-)
>
> diff --git a/lib/intel_allocator.c b/lib/intel_allocator.c
> index 96f839d4b..5313af174 100644
> --- a/lib/intel_allocator.c
> +++ b/lib/intel_allocator.c
> @@ -45,6 +45,12 @@ static inline const char *reqstr(enum reqtype request_type)
> #define alloc_debug(...) {}
> #endif
>
> +/*
> + * We limit allocator space to avoid hang when batch would be
> + * pinned in the last page.
> + */
> +#define RESERVED 4096
> +
> struct allocator {
> int fd;
> uint32_t ctx;
> @@ -58,12 +64,11 @@ struct handle_entry {
> struct allocator *al;
> };
>
> -struct intel_allocator *intel_allocator_reloc_create(int fd);
> -struct intel_allocator *intel_allocator_random_create(int fd);
> -struct intel_allocator *intel_allocator_simple_create(int fd);
> +struct intel_allocator *intel_allocator_reloc_create(int fd, uint64_t end);
> +struct intel_allocator *intel_allocator_random_create(int fd, uint64_t end);
> struct intel_allocator *
> -intel_allocator_simple_create_full(int fd, uint64_t start, uint64_t end,
> - enum allocator_strategy strategy);
> +intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
> + enum allocator_strategy strategy);
>
> /*
> * Instead of trying to find first empty handle just get new one. Assuming
> @@ -286,17 +291,14 @@ static struct intel_allocator *intel_allocator_create(int fd,
> "We cannot use NONE allocator\n");
> break;
> case INTEL_ALLOCATOR_RELOC:
> - ial = intel_allocator_reloc_create(fd);
> + ial = intel_allocator_reloc_create(fd, end);
> break;
> case INTEL_ALLOCATOR_RANDOM:
> - ial = intel_allocator_random_create(fd);
> + ial = intel_allocator_random_create(fd, end);
> break;
> case INTEL_ALLOCATOR_SIMPLE:
> - if (!start && !end)
> - ial = intel_allocator_simple_create(fd);
> - else
> - ial = intel_allocator_simple_create_full(fd, start, end,
> - allocator_strategy);
> + ial = intel_allocator_simple_create(fd, start, end,
> + allocator_strategy);
> break;
> default:
> igt_assert_f(ial, "Allocator type %d not implemented\n",
> @@ -877,6 +879,13 @@ static uint64_t __intel_allocator_open_full(int fd, uint32_t ctx,
> .open.allocator_type = allocator_type,
> .open.allocator_strategy = strategy };
> struct alloc_resp resp;
> + uint64_t gtt_size = gem_aperture_size(fd);
> +
> + igt_assert(end <= gtt_size);
> + if (!gem_uses_full_ppgtt(fd))
> + gtt_size /= 2;
> + igt_assert(end - start <= gtt_size);
> +
>
> /* Get child_tid only once at open() */
> if (child_tid == -1)
> @@ -954,13 +963,27 @@ uint64_t intel_allocator_open_vm_full(int fd, uint32_t vm,
> */
> uint64_t intel_allocator_open(int fd, uint32_t ctx, uint8_t allocator_type)
> {
> - return intel_allocator_open_full(fd, ctx, 0, 0, allocator_type,
> + uint64_t gtt_size = gem_aperture_size(fd);
> +
> + if (!gem_uses_full_ppgtt(fd))
> + gtt_size /= 2;
> + else
> + gtt_size -= RESERVED;
> +
> + return intel_allocator_open_full(fd, ctx, 0, gtt_size, allocator_type,
> ALLOC_STRATEGY_HIGH_TO_LOW);
> }
>
> uint64_t intel_allocator_open_vm(int fd, uint32_t vm, uint8_t allocator_type)
> {
> - return intel_allocator_open_vm_full(fd, vm, 0, 0, allocator_type,
> + uint64_t gtt_size = gem_aperture_size(fd);
> +
> + if (!gem_uses_full_ppgtt(fd))
> + gtt_size /= 2;
> + else
> + gtt_size -= RESERVED;
> +
> + return intel_allocator_open_vm_full(fd, vm, 0, gtt_size, allocator_type,
> ALLOC_STRATEGY_HIGH_TO_LOW);
> }
>
> diff --git a/lib/intel_allocator_random.c b/lib/intel_allocator_random.c
> index 3d9a78f17..901a301a2 100644
> --- a/lib/intel_allocator_random.c
> +++ b/lib/intel_allocator_random.c
> @@ -10,12 +10,12 @@
> #include "igt_rand.h"
> #include "intel_allocator.h"
>
> -struct intel_allocator *intel_allocator_random_create(int fd);
> +struct intel_allocator *intel_allocator_random_create(int fd, uint64_t end);
>
> struct intel_allocator_random {
> uint64_t bias;
> uint32_t prng;
> - uint64_t gtt_size;
> + uint64_t address_mask;
> uint64_t start;
> uint64_t end;
>
> @@ -23,12 +23,8 @@ struct intel_allocator_random {
> uint64_t allocated_objects;
> };
>
> -static uint64_t get_bias(int fd)
> -{
> - (void) fd;
> +#define BIAS 256 << 10;
>
> - return 256 << 10;
> -}
>
> static void intel_allocator_random_get_address_range(struct intel_allocator *ial,
> uint64_t *startp,
> @@ -57,8 +53,8 @@ static uint64_t intel_allocator_random_alloc(struct intel_allocator *ial,
> /* randomize the address, we try to avoid relocations */
> do {
> offset = hars_petruska_f54_1_random64(&ialr->prng);
> - offset += ialr->bias; /* Keep the low 256k clear, for negative deltas */
> - offset &= ialr->gtt_size - 1;
> + offset |= ialr->bias; /* Keep the low 256k clear, for negative deltas */
> + offset &= ialr->address_mask;
> offset &= ~(alignment - 1);
> } while (offset + size > ialr->end);
>
> @@ -150,8 +146,7 @@ static bool intel_allocator_random_is_empty(struct intel_allocator *ial)
> return !ialr->allocated_objects;
> }
>
> -#define RESERVED 4096
> -struct intel_allocator *intel_allocator_random_create(int fd)
> +struct intel_allocator *intel_allocator_random_create(int fd, uint64_t end)
> {
> struct intel_allocator *ial;
> struct intel_allocator_random *ialr;
> @@ -175,14 +170,12 @@ struct intel_allocator *intel_allocator_random_create(int fd)
> ialr = ial->priv = calloc(1, sizeof(*ialr));
> igt_assert(ial->priv);
> ialr->prng = (uint32_t) to_user_pointer(ial);
> - ialr->gtt_size = gem_aperture_size(fd);
> - igt_debug("Gtt size: %" PRId64 "\n", ialr->gtt_size);
> - if (!gem_uses_full_ppgtt(fd))
> - ialr->gtt_size /= 2;
> + igt_assert(end > 0);
> + ialr->address_mask = (1ULL << (igt_fls(end) - 1)) - 1;
>
> - ialr->bias = get_bias(fd);
> + ialr->bias = BIAS;
> ialr->start = ialr->bias;
> - ialr->end = ialr->gtt_size - RESERVED;
> + ialr->end = end;
>
> ialr->allocated_objects = 0;
>
> diff --git a/lib/intel_allocator_reloc.c b/lib/intel_allocator_reloc.c
> index e8af787b0..e856132a7 100644
> --- a/lib/intel_allocator_reloc.c
> +++ b/lib/intel_allocator_reloc.c
> @@ -10,12 +10,11 @@
> #include "igt_rand.h"
> #include "intel_allocator.h"
>
> -struct intel_allocator *intel_allocator_reloc_create(int fd);
> +struct intel_allocator *intel_allocator_reloc_create(int fd, uint64_t end);
>
> struct intel_allocator_reloc {
> uint64_t bias;
> uint32_t prng;
> - uint64_t gtt_size;
> uint64_t start;
> uint64_t end;
> uint64_t offset;
> @@ -24,12 +23,8 @@ struct intel_allocator_reloc {
> uint64_t allocated_objects;
> };
>
> -static uint64_t get_bias(int fd)
> -{
> - (void) fd;
> +#define BIAS 256 << 10;
>
> - return 256 << 10;
> -}
>
> static void intel_allocator_reloc_get_address_range(struct intel_allocator *ial,
> uint64_t *startp,
> @@ -152,8 +147,7 @@ static bool intel_allocator_reloc_is_empty(struct intel_allocator *ial)
> return !ialr->allocated_objects;
> }
>
> -#define RESERVED 4096
> -struct intel_allocator *intel_allocator_reloc_create(int fd)
> +struct intel_allocator *intel_allocator_reloc_create(int fd, uint64_t end)
We don't need fd too, we can just get rid of them from allocator implementation
(it must be set in intel_allocator.c).
> {
> struct intel_allocator *ial;
> struct intel_allocator_reloc *ialr;
> @@ -177,14 +171,10 @@ struct intel_allocator *intel_allocator_reloc_create(int fd)
> ialr = ial->priv = calloc(1, sizeof(*ialr));
> igt_assert(ial->priv);
> ialr->prng = (uint32_t) to_user_pointer(ial);
> - ialr->gtt_size = gem_aperture_size(fd);
> - igt_debug("Gtt size: %" PRId64 "\n", ialr->gtt_size);
> - if (!gem_uses_full_ppgtt(fd))
> - ialr->gtt_size /= 2;
>
> - ialr->bias = ialr->offset = get_bias(fd);
> + ialr->bias = ialr->offset = BIAS;
> ialr->start = ialr->bias;
> - ialr->end = ialr->gtt_size - RESERVED;
> + ialr->end = end;
>
> ialr->allocated_objects = 0;
>
> diff --git a/lib/intel_allocator_simple.c b/lib/intel_allocator_simple.c
> index 963d8d257..6022e832b 100644
> --- a/lib/intel_allocator_simple.c
> +++ b/lib/intel_allocator_simple.c
> @@ -11,17 +11,11 @@
> #include "intel_bufops.h"
> #include "igt_map.h"
>
> -/*
> - * We limit allocator space to avoid hang when batch would be
> - * pinned in the last page.
> - */
> -#define RESERVED 4096
>
> /* Avoid compilation warning */
> -struct intel_allocator *intel_allocator_simple_create(int fd);
> struct intel_allocator *
> -intel_allocator_simple_create_full(int fd, uint64_t start, uint64_t end,
> - enum allocator_strategy strategy);
> +intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
> + enum allocator_strategy strategy);
>
> struct simple_vma_heap {
> struct igt_list_head holes;
> @@ -734,9 +728,9 @@ static void intel_allocator_simple_print(struct intel_allocator *ial, bool full)
> ials->allocated_objects, ials->reserved_areas);
> }
>
> -static struct intel_allocator *
> -__intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
> - enum allocator_strategy strategy)
> +struct intel_allocator *
> +intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
> + enum allocator_strategy strategy)
> {
> struct intel_allocator *ial;
> struct intel_allocator_simple *ials;
> @@ -777,31 +771,3 @@ __intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
>
> return ial;
> }
> -
> -struct intel_allocator *
> -intel_allocator_simple_create(int fd)
> -{
> - uint64_t gtt_size = gem_aperture_size(fd);
> -
> - if (!gem_uses_full_ppgtt(fd))
> - gtt_size /= 2;
> - else
> - gtt_size -= RESERVED;
> -
> - return __intel_allocator_simple_create(fd, 0, gtt_size,
> - ALLOC_STRATEGY_HIGH_TO_LOW);
> -}
> -
> -struct intel_allocator *
> -intel_allocator_simple_create_full(int fd, uint64_t start, uint64_t end,
> - enum allocator_strategy strategy)
> -{
> - uint64_t gtt_size = gem_aperture_size(fd);
> -
> - igt_assert(end <= gtt_size);
> - if (!gem_uses_full_ppgtt(fd))
> - gtt_size /= 2;
> - igt_assert(end - start <= gtt_size);
> -
> - return __intel_allocator_simple_create(fd, start, end, strategy);
> -}
> diff --git a/tests/i915/api_intel_allocator.c b/tests/i915/api_intel_allocator.c
> index 182d9ba79..b4f7799e7 100644
> --- a/tests/i915/api_intel_allocator.c
> +++ b/tests/i915/api_intel_allocator.c
> @@ -620,6 +620,50 @@ static void execbuf_with_allocator(int fd)
> igt_assert(intel_allocator_close(ahnd) == true);
> }
>
> +static void multiprocess(int fd, uint8_t type) {
Reopen would be better name than multiprocess imo.
> + uint64_t p_ahnd, sh_ahnd, fd_ahnd, ctx_ahnd;
> + uint64_t sh_left, sh_right, fd_left, fd_right;
> + uint64_t offset;
> +
> + intel_allocator_multiprocess_start();
> +
> + p_ahnd = intel_allocator_open(fd, 0, type);
> + offset = intel_allocator_alloc(p_ahnd, 1, 123, 0);
> + if (type == INTEL_ALLOCATOR_SIMPLE)
> + igt_assert(intel_allocator_is_allocated(p_ahnd, 1, 123, offset));
> +
> + igt_fork(child, 1) {
> +
Extra space.
> + sh_ahnd = intel_allocator_open(fd, 0, type);
> + if (type == INTEL_ALLOCATOR_SIMPLE)
> + igt_assert(intel_allocator_is_allocated(sh_ahnd, 1, 123, offset));
> +
> + ctx_ahnd = intel_allocator_open(fd, 1, type);
> + igt_assert(!intel_allocator_is_allocated(ctx_ahnd, 1, 123, offset));
> + intel_allocator_alloc(ctx_ahnd, 2, 123, 0);
> +
> + fd = gem_reopen_driver(fd);
> + fd_ahnd = intel_allocator_open(fd, 0, type);
> + igt_assert(!intel_allocator_is_allocated(fd_ahnd, 1, 123, offset));
> + intel_allocator_alloc(fd_ahnd, 2, 123, 0);
> +
> +
Same.
--
Zbigniew
> + intel_allocator_get_address_range(sh_ahnd, &sh_left, &sh_right);
> + intel_allocator_get_address_range(fd_ahnd, &fd_left, &fd_right);
> + igt_assert(sh_left == fd_left && sh_right == fd_right);
> +
> + intel_allocator_close(sh_ahnd);
> + intel_allocator_close(ctx_ahnd);
> + intel_allocator_close(fd_ahnd);
> +
> + }
> +
> + igt_waitchildren();
> + intel_allocator_close(p_ahnd);
> +
> + intel_allocator_multiprocess_stop();
> +}
> +
> struct allocators {
> const char *name;
> uint8_t type;
> @@ -671,6 +715,9 @@ igt_main
> igt_dynamic("reserve")
> reserve(fd, a->type);
> }
> +
> + igt_dynamic("multiprocess")
> + multiprocess(fd, a->type);
> }
> }
>
> --
> 2.25.1
>
More information about the igt-dev
mailing list