[PATCH libdrm v2 1/2] amdgpu/test: Allow BO mapping flags to be passed in tests

Andrey Grodzovsky andrey.grodzovsky at amd.com
Fri Sep 28 14:45:07 UTC 2018


v2:
Call amdgpu_bo_va_op_raw directly in amdgpu_bo_alloc_and_map_raw
Move amdgpu_bo_alloc_and_map_raw into C file to avoid including
unistd.h in amdgpu_test.h

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky at amd.com>
---
 tests/amdgpu/amdgpu_test.h | 59 +++++++++-----------------------------------
 tests/amdgpu/basic_tests.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 72 insertions(+), 48 deletions(-)

diff --git a/tests/amdgpu/amdgpu_test.h b/tests/amdgpu/amdgpu_test.h
index 0609a74..a3830bd 100644
--- a/tests/amdgpu/amdgpu_test.h
+++ b/tests/amdgpu/amdgpu_test.h
@@ -280,7 +280,7 @@ static inline int gpu_mem_free(amdgpu_bo_handle bo,
 
 static inline int
 amdgpu_bo_alloc_wrap(amdgpu_device_handle dev, unsigned size,
-		     unsigned alignment, unsigned heap, uint64_t flags,
+		     unsigned alignment, unsigned heap, uint64_t alloc_flags,
 		     amdgpu_bo_handle *bo)
 {
 	struct amdgpu_bo_alloc_request request = {};
@@ -290,7 +290,7 @@ amdgpu_bo_alloc_wrap(amdgpu_device_handle dev, unsigned size,
 	request.alloc_size = size;
 	request.phys_alignment = alignment;
 	request.preferred_heap = heap;
-	request.flags = flags;
+	request.flags = alloc_flags;
 
 	r = amdgpu_bo_alloc(dev, &request, &buf_handle);
 	if (r)
@@ -301,57 +301,20 @@ amdgpu_bo_alloc_wrap(amdgpu_device_handle dev, unsigned size,
 	return 0;
 }
 
+int amdgpu_bo_alloc_and_map_raw(amdgpu_device_handle dev, unsigned size,
+			unsigned alignment, unsigned heap, uint64_t alloc_flags,
+			uint64_t mapping_flags, amdgpu_bo_handle *bo, void **cpu,
+			uint64_t *mc_address,
+			amdgpu_va_handle *va_handle);
+
 static inline int
 amdgpu_bo_alloc_and_map(amdgpu_device_handle dev, unsigned size,
-			unsigned alignment, unsigned heap, uint64_t flags,
+			unsigned alignment, unsigned heap, uint64_t alloc_flags,
 			amdgpu_bo_handle *bo, void **cpu, uint64_t *mc_address,
 			amdgpu_va_handle *va_handle)
 {
-	struct amdgpu_bo_alloc_request request = {};
-	amdgpu_bo_handle buf_handle;
-	amdgpu_va_handle handle;
-	uint64_t vmc_addr;
-	int r;
-
-	request.alloc_size = size;
-	request.phys_alignment = alignment;
-	request.preferred_heap = heap;
-	request.flags = flags;
-
-	r = amdgpu_bo_alloc(dev, &request, &buf_handle);
-	if (r)
-		return r;
-
-	r = amdgpu_va_range_alloc(dev,
-				  amdgpu_gpu_va_range_general,
-				  size, alignment, 0, &vmc_addr,
-				  &handle, 0);
-	if (r)
-		goto error_va_alloc;
-
-	r = amdgpu_bo_va_op(buf_handle, 0, size, vmc_addr, 0, AMDGPU_VA_OP_MAP);
-	if (r)
-		goto error_va_map;
-
-	r = amdgpu_bo_cpu_map(buf_handle, cpu);
-	if (r)
-		goto error_cpu_map;
-
-	*bo = buf_handle;
-	*mc_address = vmc_addr;
-	*va_handle = handle;
-
-	return 0;
-
-error_cpu_map:
-	amdgpu_bo_cpu_unmap(buf_handle);
-
-error_va_map:
-	amdgpu_bo_va_op(buf_handle, 0, size, vmc_addr, 0, AMDGPU_VA_OP_UNMAP);
-
-error_va_alloc:
-	amdgpu_bo_free(buf_handle);
-	return r;
+	return amdgpu_bo_alloc_and_map_raw(dev, size, alignment, heap,
+					alloc_flags, 0, bo, cpu, mc_address, va_handle);
 }
 
 static inline int
diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
index 1adbddd..cceffc7 100644
--- a/tests/amdgpu/basic_tests.c
+++ b/tests/amdgpu/basic_tests.c
@@ -33,6 +33,7 @@
 
 #include "amdgpu_test.h"
 #include "amdgpu_drm.h"
+#include "util_math.h"
 
 static  amdgpu_device_handle device_handle;
 static  uint32_t  major_version;
@@ -286,6 +287,66 @@ static  uint32_t shader_bin[] = {
 #define DATA_OFFSET 1024
 
 
+int amdgpu_bo_alloc_and_map_raw(amdgpu_device_handle dev, unsigned size,
+			unsigned alignment, unsigned heap, uint64_t alloc_flags,
+			uint64_t mapping_flags, amdgpu_bo_handle *bo, void **cpu,
+			uint64_t *mc_address,
+			amdgpu_va_handle *va_handle)
+{
+	struct amdgpu_bo_alloc_request request = {};
+	amdgpu_bo_handle buf_handle;
+	amdgpu_va_handle handle;
+	uint64_t vmc_addr;
+	int r;
+
+	request.alloc_size = size;
+	request.phys_alignment = alignment;
+	request.preferred_heap = heap;
+	request.flags = alloc_flags;
+
+	r = amdgpu_bo_alloc(dev, &request, &buf_handle);
+	if (r)
+		return r;
+
+	r = amdgpu_va_range_alloc(dev,
+				  amdgpu_gpu_va_range_general,
+				  size, alignment, 0, &vmc_addr,
+				  &handle, 0);
+	if (r)
+		goto error_va_alloc;
+
+	r = amdgpu_bo_va_op_raw(dev, buf_handle, 0,  ALIGN(size, getpagesize()), vmc_addr,
+				   AMDGPU_VM_PAGE_READABLE |
+				   AMDGPU_VM_PAGE_WRITEABLE |
+				   AMDGPU_VM_PAGE_EXECUTABLE |
+				   alloc_flags,
+				   AMDGPU_VA_OP_MAP);
+	if (r)
+		goto error_va_map;
+
+	r = amdgpu_bo_cpu_map(buf_handle, cpu);
+	if (r)
+		goto error_cpu_map;
+
+	*bo = buf_handle;
+	*mc_address = vmc_addr;
+	*va_handle = handle;
+
+	return 0;
+
+ error_cpu_map:
+	amdgpu_bo_cpu_unmap(buf_handle);
+
+ error_va_map:
+	amdgpu_bo_va_op(buf_handle, 0, size, vmc_addr, 0, AMDGPU_VA_OP_UNMAP);
+
+ error_va_alloc:
+	amdgpu_bo_free(buf_handle);
+	return r;
+}
+
+
+
 int suite_basic_tests_init(void)
 {
 	struct amdgpu_gpu_info gpu_info = {0};
-- 
2.7.4



More information about the amd-gfx mailing list