[igt-dev] [PATCH i-g-t 2/3] lib/intel_allocator: Move the ioctl calls to client processes
Andrzej Turko
andrzej.turko at linux.intel.com
Thu May 27 19:27:49 UTC 2021
When allocator is running in multiprocess mode, all queries
are processed in a designated thread in the parent process.
However, a child process may request opening the allocator
for a gpu using a file descriptor absent in the parent process.
Hence, querying available gtt size must be done in the child
instead of the parent process.
This modification has triggered slight changes to the
creation of random and reloc allocators.
Signed-off-by: Andrzej Turko <andrzej.turko at linux.intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
---
lib/intel_allocator.c | 48 ++++++++++++++++++++++++++----------
lib/intel_allocator_random.c | 26 +++++++------------
lib/intel_allocator_reloc.c | 21 ++++------------
lib/intel_allocator_simple.c | 44 ++++-----------------------------
4 files changed, 54 insertions(+), 85 deletions(-)
diff --git a/lib/intel_allocator.c b/lib/intel_allocator.c
index 96f839d4b..a78a661b1 100644
--- a/lib/intel_allocator.c
+++ b/lib/intel_allocator.c
@@ -45,6 +45,12 @@ static inline const char *reqstr(enum reqtype request_type)
#define alloc_debug(...) {}
#endif
+/*
+ * We limit allocator space to avoid hang when batch would be
+ * pinned in the last page.
+ */
+#define RESERVED 4096
+
struct allocator {
int fd;
uint32_t ctx;
@@ -58,12 +64,11 @@ struct handle_entry {
struct allocator *al;
};
-struct intel_allocator *intel_allocator_reloc_create(int fd);
-struct intel_allocator *intel_allocator_random_create(int fd);
-struct intel_allocator *intel_allocator_simple_create(int fd);
+struct intel_allocator *intel_allocator_reloc_create(int fd, uint64_t end);
+struct intel_allocator *intel_allocator_random_create(int fd, uint64_t end);
struct intel_allocator *
-intel_allocator_simple_create_full(int fd, uint64_t start, uint64_t end,
- enum allocator_strategy strategy);
+intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
+ enum allocator_strategy strategy);
/*
* Instead of trying to find first empty handle just get new one. Assuming
@@ -286,17 +291,14 @@ static struct intel_allocator *intel_allocator_create(int fd,
"We cannot use NONE allocator\n");
break;
case INTEL_ALLOCATOR_RELOC:
- ial = intel_allocator_reloc_create(fd);
+ ial = intel_allocator_reloc_create(fd, end);
break;
case INTEL_ALLOCATOR_RANDOM:
- ial = intel_allocator_random_create(fd);
+ ial = intel_allocator_random_create(fd, end);
break;
case INTEL_ALLOCATOR_SIMPLE:
- if (!start && !end)
- ial = intel_allocator_simple_create(fd);
- else
- ial = intel_allocator_simple_create_full(fd, start, end,
- allocator_strategy);
+ ial = intel_allocator_simple_create(fd, start, end,
+ allocator_strategy);
break;
default:
igt_assert_f(ial, "Allocator type %d not implemented\n",
@@ -877,6 +879,23 @@ static uint64_t __intel_allocator_open_full(int fd, uint32_t ctx,
.open.allocator_type = allocator_type,
.open.allocator_strategy = strategy };
struct alloc_resp resp;
+ uint64_t gtt_size = gem_aperture_size(fd);
+
+ if (!start && !end) {
+ if (!gem_uses_full_ppgtt(fd))
+ gtt_size /= 2;
+ else
+ gtt_size -= RESERVED;
+
+ req.open.end = gtt_size;
+ } else {
+
+ igt_assert(end <= gtt_size);
+ if (!gem_uses_full_ppgtt(fd))
+ gtt_size /= 2;
+ igt_assert(end - start <= gtt_size);
+ }
+
/* Get child_tid only once at open() */
if (child_tid == -1)
@@ -907,6 +926,9 @@ static uint64_t __intel_allocator_open_full(int fd, uint32_t ctx,
* Returns: unique handle to the currently opened allocator.
*
* Notes:
+ *
+ * If start = end = 0, the allocator is opened for the whole available gtt.
+ *
* Strategy is generally used internally by the underlying allocator:
*
* For SIMPLE allocator:
@@ -915,7 +937,7 @@ static uint64_t __intel_allocator_open_full(int fd, uint32_t ctx,
* addresses.
*
* For RANDOM allocator:
- * - none of strategy is currently implemented.
+ * - no strategy is currently implemented.
*/
uint64_t intel_allocator_open_full(int fd, uint32_t ctx,
uint64_t start, uint64_t end,
diff --git a/lib/intel_allocator_random.c b/lib/intel_allocator_random.c
index 3d9a78f17..85ac2cf4e 100644
--- a/lib/intel_allocator_random.c
+++ b/lib/intel_allocator_random.c
@@ -10,12 +10,12 @@
#include "igt_rand.h"
#include "intel_allocator.h"
-struct intel_allocator *intel_allocator_random_create(int fd);
+struct intel_allocator *intel_allocator_random_create(int fd, uint64_t end);
struct intel_allocator_random {
uint64_t bias;
uint32_t prng;
- uint64_t gtt_size;
+ uint64_t address_mask;
uint64_t start;
uint64_t end;
@@ -23,12 +23,7 @@ struct intel_allocator_random {
uint64_t allocated_objects;
};
-static uint64_t get_bias(int fd)
-{
- (void) fd;
-
- return 256 << 10;
-}
+#define BIAS 256 << 10
static void intel_allocator_random_get_address_range(struct intel_allocator *ial,
uint64_t *startp,
@@ -58,7 +53,7 @@ static uint64_t intel_allocator_random_alloc(struct intel_allocator *ial,
do {
offset = hars_petruska_f54_1_random64(&ialr->prng);
offset += ialr->bias; /* Keep the low 256k clear, for negative deltas */
- offset &= ialr->gtt_size - 1;
+ offset &= ialr->address_mask;
offset &= ~(alignment - 1);
} while (offset + size > ialr->end);
@@ -150,8 +145,7 @@ static bool intel_allocator_random_is_empty(struct intel_allocator *ial)
return !ialr->allocated_objects;
}
-#define RESERVED 4096
-struct intel_allocator *intel_allocator_random_create(int fd)
+struct intel_allocator *intel_allocator_random_create(int fd, uint64_t end)
{
struct intel_allocator *ial;
struct intel_allocator_random *ialr;
@@ -175,14 +169,12 @@ struct intel_allocator *intel_allocator_random_create(int fd)
ialr = ial->priv = calloc(1, sizeof(*ialr));
igt_assert(ial->priv);
ialr->prng = (uint32_t) to_user_pointer(ial);
- ialr->gtt_size = gem_aperture_size(fd);
- igt_debug("Gtt size: %" PRId64 "\n", ialr->gtt_size);
- if (!gem_uses_full_ppgtt(fd))
- ialr->gtt_size /= 2;
- ialr->bias = get_bias(fd);
+ igt_assert(BIAS < end);
+ ialr->address_mask = (1ULL << (igt_fls(end) - 1)) - 1;
+ ialr->bias = BIAS;
ialr->start = ialr->bias;
- ialr->end = ialr->gtt_size - RESERVED;
+ ialr->end = end;
ialr->allocated_objects = 0;
diff --git a/lib/intel_allocator_reloc.c b/lib/intel_allocator_reloc.c
index e8af787b0..703d054c6 100644
--- a/lib/intel_allocator_reloc.c
+++ b/lib/intel_allocator_reloc.c
@@ -10,12 +10,11 @@
#include "igt_rand.h"
#include "intel_allocator.h"
-struct intel_allocator *intel_allocator_reloc_create(int fd);
+struct intel_allocator *intel_allocator_reloc_create(int fd, uint64_t end);
struct intel_allocator_reloc {
uint64_t bias;
uint32_t prng;
- uint64_t gtt_size;
uint64_t start;
uint64_t end;
uint64_t offset;
@@ -24,12 +23,7 @@ struct intel_allocator_reloc {
uint64_t allocated_objects;
};
-static uint64_t get_bias(int fd)
-{
- (void) fd;
-
- return 256 << 10;
-}
+#define BIAS 256 << 10
static void intel_allocator_reloc_get_address_range(struct intel_allocator *ial,
uint64_t *startp,
@@ -152,8 +146,7 @@ static bool intel_allocator_reloc_is_empty(struct intel_allocator *ial)
return !ialr->allocated_objects;
}
-#define RESERVED 4096
-struct intel_allocator *intel_allocator_reloc_create(int fd)
+struct intel_allocator *intel_allocator_reloc_create(int fd, uint64_t end)
{
struct intel_allocator *ial;
struct intel_allocator_reloc *ialr;
@@ -177,14 +170,10 @@ struct intel_allocator *intel_allocator_reloc_create(int fd)
ialr = ial->priv = calloc(1, sizeof(*ialr));
igt_assert(ial->priv);
ialr->prng = (uint32_t) to_user_pointer(ial);
- ialr->gtt_size = gem_aperture_size(fd);
- igt_debug("Gtt size: %" PRId64 "\n", ialr->gtt_size);
- if (!gem_uses_full_ppgtt(fd))
- ialr->gtt_size /= 2;
- ialr->bias = ialr->offset = get_bias(fd);
+ ialr->bias = ialr->offset = BIAS;
ialr->start = ialr->bias;
- ialr->end = ialr->gtt_size - RESERVED;
+ ialr->end = end;
ialr->allocated_objects = 0;
diff --git a/lib/intel_allocator_simple.c b/lib/intel_allocator_simple.c
index 963d8d257..6022e832b 100644
--- a/lib/intel_allocator_simple.c
+++ b/lib/intel_allocator_simple.c
@@ -11,17 +11,11 @@
#include "intel_bufops.h"
#include "igt_map.h"
-/*
- * We limit allocator space to avoid hang when batch would be
- * pinned in the last page.
- */
-#define RESERVED 4096
/* Avoid compilation warning */
-struct intel_allocator *intel_allocator_simple_create(int fd);
struct intel_allocator *
-intel_allocator_simple_create_full(int fd, uint64_t start, uint64_t end,
- enum allocator_strategy strategy);
+intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
+ enum allocator_strategy strategy);
struct simple_vma_heap {
struct igt_list_head holes;
@@ -734,9 +728,9 @@ static void intel_allocator_simple_print(struct intel_allocator *ial, bool full)
ials->allocated_objects, ials->reserved_areas);
}
-static struct intel_allocator *
-__intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
- enum allocator_strategy strategy)
+struct intel_allocator *
+intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
+ enum allocator_strategy strategy)
{
struct intel_allocator *ial;
struct intel_allocator_simple *ials;
@@ -777,31 +771,3 @@ __intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
return ial;
}
-
-struct intel_allocator *
-intel_allocator_simple_create(int fd)
-{
- uint64_t gtt_size = gem_aperture_size(fd);
-
- if (!gem_uses_full_ppgtt(fd))
- gtt_size /= 2;
- else
- gtt_size -= RESERVED;
-
- return __intel_allocator_simple_create(fd, 0, gtt_size,
- ALLOC_STRATEGY_HIGH_TO_LOW);
-}
-
-struct intel_allocator *
-intel_allocator_simple_create_full(int fd, uint64_t start, uint64_t end,
- enum allocator_strategy strategy)
-{
- uint64_t gtt_size = gem_aperture_size(fd);
-
- igt_assert(end <= gtt_size);
- if (!gem_uses_full_ppgtt(fd))
- gtt_size /= 2;
- igt_assert(end - start <= gtt_size);
-
- return __intel_allocator_simple_create(fd, start, end, strategy);
-}
--
2.25.1
More information about the igt-dev
mailing list