[PATCH i-g-t 1/1] lib/intel_allocator: Use safe start offset instead of 0

Zbigniew Kempczyński zbigniew.kempczynski at intel.com
Tue Jan 11 08:35:03 UTC 2022


Softpinning introduces additional level of complexity regarding
start offset and alignment. Lets use detection of start offset
to establish allocator vm range which should cover hw constraints
(if from some reason start offset cannot be 0).

Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Cc: Ashutosh Dixit <ashutosh.dixit at intel.com>
---
 lib/intel_allocator.c   |  2 +-
 lib/intel_allocator.h   | 19 ++++++++++++++-----
 lib/intel_batchbuffer.c | 12 +++++++++---
 3 files changed, 24 insertions(+), 9 deletions(-)

diff --git a/lib/intel_allocator.c b/lib/intel_allocator.c
index eabff1f9a..c28aacc03 100644
--- a/lib/intel_allocator.c
+++ b/lib/intel_allocator.c
@@ -892,7 +892,7 @@ static uint64_t __intel_allocator_open_full(int fd, uint32_t ctx,
 	struct alloc_resp resp;
 	uint64_t gtt_size;
 
-	if (!start && !end) {
+	if (!end) {
 		igt_assert_f(can_report_gtt_size(fd), "Invalid fd\n");
 		gtt_size = gem_aperture_size(fd);
 		if (!gem_uses_full_ppgtt(fd))
diff --git a/lib/intel_allocator.h b/lib/intel_allocator.h
index e8d807f9a..d46440a32 100644
--- a/lib/intel_allocator.h
+++ b/lib/intel_allocator.h
@@ -12,6 +12,7 @@
 #include <stdint.h>
 #include <stdatomic.h>
 #include "i915/gem_submission.h"
+#include "i915/intel_memory_region.h"
 
 /**
  * SECTION:intel_allocator
@@ -232,15 +233,19 @@ static inline uint64_t CANONICAL(uint64_t offset)
 static inline uint64_t get_simple_ahnd(int fd, uint32_t ctx)
 {
 	bool do_relocs = gem_has_relocations(fd);
+	uint64_t start = gem_detect_safe_start_offset(fd);
 
-	return do_relocs ? 0 : intel_allocator_open(fd, ctx, INTEL_ALLOCATOR_SIMPLE);
+	return do_relocs ? 0 : intel_allocator_open_full(fd, ctx, start, 0,
+							 INTEL_ALLOCATOR_SIMPLE,
+							 ALLOC_STRATEGY_HIGH_TO_LOW);
 }
 
 static inline uint64_t get_simple_l2h_ahnd(int fd, uint32_t ctx)
 {
 	bool do_relocs = gem_has_relocations(fd);
+	uint64_t start = gem_detect_safe_start_offset(fd);
 
-	return do_relocs ? 0 : intel_allocator_open_full(fd, ctx, 0, 0,
+	return do_relocs ? 0 : intel_allocator_open_full(fd, ctx, start, 0,
 							 INTEL_ALLOCATOR_SIMPLE,
 							 ALLOC_STRATEGY_LOW_TO_HIGH);
 }
@@ -248,17 +253,21 @@ static inline uint64_t get_simple_l2h_ahnd(int fd, uint32_t ctx)
 static inline uint64_t get_simple_h2l_ahnd(int fd, uint32_t ctx)
 {
 	bool do_relocs = gem_has_relocations(fd);
+	uint64_t start = gem_detect_safe_start_offset(fd);
 
-	return do_relocs ? 0 : intel_allocator_open_full(fd, ctx, 0, 0,
+	return do_relocs ? 0 : intel_allocator_open_full(fd, ctx, start, 0,
 							 INTEL_ALLOCATOR_SIMPLE,
-							 ALLOC_STRATEGY_LOW_TO_HIGH);
+							 ALLOC_STRATEGY_HIGH_TO_LOW);
 }
 
 static inline uint64_t get_reloc_ahnd(int fd, uint32_t ctx)
 {
 	bool do_relocs = gem_has_relocations(fd);
+	uint64_t start = gem_detect_safe_start_offset(fd);
 
-	return do_relocs ? 0 : intel_allocator_open(fd, ctx, INTEL_ALLOCATOR_RELOC);
+	return do_relocs ? 0 : intel_allocator_open_full(fd, ctx, start, 0,
+							 INTEL_ALLOCATOR_RELOC,
+							 ALLOC_STRATEGY_NONE);
 }
 
 static inline bool put_ahnd(uint64_t ahnd)
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index 10d8a6e0c..b3dd3ab5a 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -1435,7 +1435,9 @@ struct intel_bb *intel_bb_create_with_allocator(int i915, uint32_t ctx,
 						uint32_t size,
 						uint8_t allocator_type)
 {
-	return __intel_bb_create(i915, ctx, size, false, 0, 0,
+	uint64_t start = gem_detect_safe_start_offset(i915);
+
+	return __intel_bb_create(i915, ctx, size, false, start, 0,
 				 allocator_type, ALLOC_STRATEGY_HIGH_TO_LOW);
 }
 
@@ -1467,9 +1469,11 @@ static bool aux_needs_softpin(int i915)
 struct intel_bb *intel_bb_create(int i915, uint32_t size)
 {
 	bool relocs = gem_has_relocations(i915);
+	uint64_t start = gem_detect_safe_start_offset(i915);
 
 	return __intel_bb_create(i915, 0, size,
-				 relocs && !aux_needs_softpin(i915), 0, 0,
+				 relocs && !aux_needs_softpin(i915),
+				 start, 0,
 				 INTEL_ALLOCATOR_SIMPLE,
 				 ALLOC_STRATEGY_HIGH_TO_LOW);
 }
@@ -1490,9 +1494,11 @@ struct intel_bb *
 intel_bb_create_with_context(int i915, uint32_t ctx, uint32_t size)
 {
 	bool relocs = gem_has_relocations(i915);
+	uint64_t start = gem_detect_safe_start_offset(i915);
 
 	return __intel_bb_create(i915, ctx, size,
-				 relocs && !aux_needs_softpin(i915), 0, 0,
+				 relocs && !aux_needs_softpin(i915),
+				 start, 0,
 				 INTEL_ALLOCATOR_SIMPLE,
 				 ALLOC_STRATEGY_HIGH_TO_LOW);
 }
-- 
2.32.0



More information about the Intel-gfx-trybot mailing list