[PATCH i-g-t 6/7] tests/intel/xe_pxp: Termination tests

Daniele Ceraolo Spurio daniele.ceraolospurio at intel.com
Thu Dec 12 00:18:27 UTC 2024


There are several events that can cause the PXP key to be invalidated
and trigger a PXP termination (suspend, PXP termination irq). After a
termination, we expect the key to be different and the raw encrypted
data to change for the same source data.
Additionally, all PXP objects are invalidated during a termination and
can no longer be used in submission or kept mapped to VMs; we therefore
need to test both the execution and bind ioctls to make sure they work
as expected after a termination.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
---
 lib/intel_batchbuffer.c |   8 +-
 lib/intel_batchbuffer.h |   1 +
 lib/xe/xe_ioctl.c       |  12 +-
 lib/xe/xe_ioctl.h       |   2 +
 tests/intel/xe_pxp.c    | 431 +++++++++++++++++++++++++++++++++++++++-
 5 files changed, 442 insertions(+), 12 deletions(-)

diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index 489e78782..a15859786 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -2416,9 +2416,9 @@ static void update_offsets(struct intel_bb *ibb,
 
 #define LINELEN 76
 
-static int
-__xe_bb_exec(struct intel_bb *ibb, uint64_t flags, bool sync)
+int __xe_bb_exec(struct intel_bb *ibb, uint64_t flags, bool sync)
 {
+	int ret = 0;
 	uint32_t engine = flags & (I915_EXEC_BSD_MASK | I915_EXEC_RING_MASK);
 	uint32_t engine_id;
 	struct drm_xe_sync syncs[2] = {
@@ -2495,12 +2495,12 @@ __xe_bb_exec(struct intel_bb *ibb, uint64_t flags, bool sync)
 	ibb->engine_syncobj = syncobj_create(ibb->fd, 0);
 	syncs[1].handle = ibb->engine_syncobj;
 
-	xe_exec_sync(ibb->fd, engine_id, ibb->batch_offset, syncs, 2);
+	ret = xe_exec_sync_failable(ibb->fd, engine_id, ibb->batch_offset, syncs, 2);
 
 	if (sync)
 		intel_bb_sync(ibb);
 
-	return 0;
+	return ret;
 }
 
 static int
diff --git a/lib/intel_batchbuffer.h b/lib/intel_batchbuffer.h
index 178aaa9d8..6a7e8df4a 100644
--- a/lib/intel_batchbuffer.h
+++ b/lib/intel_batchbuffer.h
@@ -508,6 +508,7 @@ void intel_bb_dump_cache(struct intel_bb *ibb);
 
 void intel_bb_exec(struct intel_bb *ibb, uint32_t end_offset,
 		   uint64_t flags, bool sync);
+int __xe_bb_exec(struct intel_bb *ibb, uint64_t flags, bool sync);
 
 uint64_t intel_bb_get_object_offset(struct intel_bb *ibb, uint32_t handle);
 bool intel_bb_object_offset_to_buf(struct intel_bb *ibb, struct intel_buf *buf);
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index 01ab7c758..cd7314eda 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -462,8 +462,8 @@ void xe_exec(int fd, struct drm_xe_exec *exec)
 	igt_assert_eq(__xe_exec(fd, exec), 0);
 }
 
-void xe_exec_sync(int fd, uint32_t exec_queue, uint64_t addr,
-		  struct drm_xe_sync *sync, uint32_t num_syncs)
+int xe_exec_sync_failable(int fd, uint32_t exec_queue, uint64_t addr,
+			  struct drm_xe_sync *sync, uint32_t num_syncs)
 {
 	struct drm_xe_exec exec = {
 		.exec_queue_id = exec_queue,
@@ -473,7 +473,13 @@ void xe_exec_sync(int fd, uint32_t exec_queue, uint64_t addr,
 		.num_batch_buffer = 1,
 	};
 
-	igt_assert_eq(__xe_exec(fd, &exec), 0);
+	return __xe_exec(fd, &exec);
+}
+
+void xe_exec_sync(int fd, uint32_t exec_queue, uint64_t addr,
+		  struct drm_xe_sync *sync, uint32_t num_syncs)
+{
+	igt_assert_eq(xe_exec_sync_failable(fd, exec_queue, addr, sync, num_syncs), 0);
 }
 
 void xe_exec_wait(int fd, uint32_t exec_queue, uint64_t addr)
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index c8a2d81c5..9bdf73b2b 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -91,6 +91,8 @@ int __xe_exec(int fd, struct drm_xe_exec *exec);
 void xe_exec(int fd, struct drm_xe_exec *exec);
 void xe_exec_sync(int fd, uint32_t exec_queue, uint64_t addr,
 		  struct drm_xe_sync *sync, uint32_t num_syncs);
+int xe_exec_sync_failable(int fd, uint32_t exec_queue, uint64_t addr,
+		  struct drm_xe_sync *sync, uint32_t num_syncs);
 void xe_exec_wait(int fd, uint32_t exec_queue, uint64_t addr);
 int __xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
 		     uint32_t exec_queue, int64_t *timeout);
diff --git a/tests/intel/xe_pxp.c b/tests/intel/xe_pxp.c
index 3d625af8f..7e5b45263 100644
--- a/tests/intel/xe_pxp.c
+++ b/tests/intel/xe_pxp.c
@@ -4,6 +4,7 @@
  */
 
 #include "igt.h"
+#include "igt_syncobj.h"
 #include "intel_batchbuffer.h"
 #include "intel_bufops.h"
 #include "intel_mocs.h"
@@ -81,6 +82,15 @@ static uint32_t create_pxp_rcs_queue(int fd, uint32_t vm)
 	return q;
 }
 
+static uint32_t create_regular_rcs_queue(int fd, uint32_t vm)
+{
+	struct drm_xe_engine_class_instance inst = {
+		.engine_class = DRM_XE_ENGINE_CLASS_RENDER,
+	};
+
+	return xe_exec_queue_create(fd, vm, &inst, 0);
+}
+
 static int query_pxp_status(int fd)
 {
 	struct drm_xe_query_pxp_status *pxp_query;
@@ -337,15 +347,25 @@ static void pxp_rendercopy(int fd, uint32_t q, uint32_t vm, uint32_t copy_size,
 	buf_ops_destroy(bops);
 }
 
-/**
- * SUBTEST: regular-src-to-pxp-dest-rendercopy
- * Description: copy from a regular BO to a PXP one and verify the encryption
- */
-static void test_render_regular_src_to_pxp_dest(int fd)
+static void copy_bo_cpu(int fd, uint32_t bo, uint32_t *dst, uint32_t size)
+{
+	uint32_t *src_ptr;
+
+	src_ptr = xe_bo_mmap_ext(fd, bo, size, PROT_READ);
+
+	memcpy(dst, src_ptr, size);
+
+	igt_assert_eq(munmap(src_ptr, size), 0);
+}
+
+static void __test_render_regular_src_to_pxp_dest(int fd, uint32_t *outpixels, int outsize)
 {
 	uint32_t vm, srcbo, dstbo;
 	uint32_t q;
 
+	if (outpixels)
+		igt_assert_lte(TSTSURF_SIZE, outsize);
+
 	vm = xe_vm_create(fd, 0, 0);
 
 	/*
@@ -361,12 +381,24 @@ static void test_render_regular_src_to_pxp_dest(int fd)
 
 	check_bo_color(fd, dstbo, TSTSURF_SIZE, TSTSURF_FILLCOLOR1, false);
 
+	if (outpixels)
+		copy_bo_cpu(fd, dstbo, outpixels, TSTSURF_SIZE);
+
 	gem_close(fd, srcbo);
 	gem_close(fd, dstbo);
 	xe_exec_queue_destroy(fd, q);
 	xe_vm_destroy(fd, vm);
 }
 
+/**
+ * SUBTEST: regular-src-to-pxp-dest-rendercopy
+ * Description: copy from a regular BO to a PXP one and verify the encryption
+ */
+static void test_render_regular_src_to_pxp_dest(int fd)
+{
+	__test_render_regular_src_to_pxp_dest(fd, NULL, 0);
+}
+
 static int bocmp(int fd, uint32_t bo1, uint32_t bo2, uint32_t size)
 {
 	uint32_t *ptr1, *ptr2;
@@ -428,6 +460,379 @@ static void test_render_pxp_protsrc_to_protdest(int fd)
 	xe_vm_destroy(fd, vm);
 }
 
+#define PS_OP_TAG_LOW 0x1234fed0
+#define PS_OP_TAG_HI 0x5678cbaf
+static void emit_pipectrl(struct intel_bb *ibb, struct intel_buf *fenceb)
+{
+	uint32_t pipe_ctl_flags = 0;
+
+	intel_bb_out(ibb, GFX_OP_PIPE_CONTROL(2));
+	intel_bb_out(ibb, pipe_ctl_flags);
+
+	pipe_ctl_flags = (PIPE_CONTROL_FLUSH_ENABLE |
+			  PIPE_CONTROL_CS_STALL |
+			  PIPE_CONTROL_QW_WRITE);
+	intel_bb_out(ibb, GFX_OP_PIPE_CONTROL(6));
+	intel_bb_out(ibb, pipe_ctl_flags);
+
+	intel_bb_emit_reloc(ibb, fenceb->handle, 0, I915_GEM_DOMAIN_COMMAND, 0,
+			    fenceb->addr.offset);
+	intel_bb_out(ibb, PS_OP_TAG_LOW);
+	intel_bb_out(ibb, PS_OP_TAG_HI);
+	intel_bb_out(ibb, MI_NOOP);
+	intel_bb_out(ibb, MI_NOOP);
+}
+
+static void assert_pipectl_storedw_done(int fd, uint32_t bo)
+{
+	uint32_t *ptr;
+
+	ptr = xe_bo_mmap_ext(fd, bo, 4096, PROT_READ|PROT_WRITE);
+	igt_assert_eq(ptr[0], PS_OP_TAG_LOW);
+	igt_assert_eq(ptr[1], PS_OP_TAG_HI);
+
+	igt_assert(munmap(ptr, 4096) == 0);
+}
+
+static int submit_flush_store_dw(int fd, uint32_t q, bool q_is_pxp, uint32_t vm,
+				 uint32_t dst, bool dst_is_pxp)
+{
+	struct intel_buf *dstbuf;
+	struct buf_ops *bops;
+	struct intel_bb *ibb;
+	int ret = 0;
+
+	bops = buf_ops_create(fd);
+	igt_assert(bops);
+
+	ibb = intel_bb_create_with_context(fd, q, vm, NULL, 4096);
+	igt_assert(ibb);
+	intel_bb_set_pxp(ibb, q_is_pxp, DISPLAY_APPTYPE, DRM_XE_PXP_HWDRM_DEFAULT_SESSION);
+
+	dstbuf = buf_create(fd, bops, dst, 256, 4, 32, 4096);
+	intel_buf_set_pxp(dstbuf, dst_is_pxp);
+
+	intel_bb_ptr_set(ibb, 0);
+	intel_bb_add_intel_buf(ibb, dstbuf, true);
+	emit_pipectrl(ibb, dstbuf);
+	intel_bb_emit_bbe(ibb);
+	ret = __xe_bb_exec(ibb, 0, false);
+	if (ret == 0)
+		ret = intel_bb_sync(ibb);
+	if (ret == 0)
+		assert_pipectl_storedw_done(fd, dst);
+
+	intel_buf_destroy(dstbuf);
+	intel_bb_destroy(ibb);
+	buf_ops_destroy(bops);
+
+	return ret;
+}
+
+static void trigger_pxp_debugfs_forced_teardown(int xe_fd)
+{
+	char str[32];
+	int ret;
+	int fd;
+
+	fd = igt_debugfs_dir(xe_fd);
+	igt_assert(fd >= 0);
+	ret = igt_debugfs_simple_read(fd, "pxp/terminate", str, 32);
+	igt_assert_f(ret >= 0, "Can't open pxp termination debugfs\n");
+
+	/* give the kernel time to handle the termination */
+	sleep(1);
+}
+
+enum termination_type {
+	PXP_TERMINATION_IRQ,
+	PXP_TERMINATION_RPM,
+	PXP_TERMINATION_SUSPEND
+};
+
+static void trigger_termination(int fd, enum termination_type type)
+{
+	switch (type) {
+	case PXP_TERMINATION_IRQ:
+		trigger_pxp_debugfs_forced_teardown(fd);
+		break;
+	case PXP_TERMINATION_RPM:
+		igt_require(igt_wait_for_pm_status(IGT_RUNTIME_PM_STATUS_SUSPENDED));
+		break;
+	case PXP_TERMINATION_SUSPEND:
+		igt_system_suspend_autoresume(SUSPEND_STATE_MEM, SUSPEND_TEST_DEVICES);
+		break;
+	}
+}
+
+/**
+ * SUBTEST: pxp-termination-key-update-post-termination-irq
+ * Description: Verify key is changed after a termination irq
+ */
+
+/**
+ * SUBTEST: pxp-termination-key-update-post-suspend
+ * Description: Verify key is changed after a suspend/resume cycle
+ */
+
+/**
+ * SUBTEST: pxp-termination-key-update-post-rpm
+ * Description: Verify key is changed after a runtime suspend/resume cycle
+ */
+
+static void test_pxp_teardown_keychange(int fd, enum termination_type type)
+{
+	uint32_t* encrypted_data_before;
+	uint32_t* encrypted_data_after;
+	int matched_after_keychange = 0, loop = 0;
+
+	encrypted_data_before = malloc(TSTSURF_SIZE);
+	encrypted_data_after = malloc(TSTSURF_SIZE);
+	igt_assert(encrypted_data_before && encrypted_data_after);
+
+	__test_render_regular_src_to_pxp_dest(fd, encrypted_data_before, TSTSURF_SIZE);
+
+	trigger_termination(fd, type);
+
+	__test_render_regular_src_to_pxp_dest(fd, encrypted_data_after, TSTSURF_SIZE);
+
+	while (loop < (TSTSURF_SIZE/TSTSURF_BYTESPP)) {
+		if (encrypted_data_before[loop] == encrypted_data_after[loop])
+			++matched_after_keychange;
+		++loop;
+	}
+	igt_assert_eq(matched_after_keychange, 0);
+
+	free(encrypted_data_before);
+	free(encrypted_data_after);
+}
+
+/**
+ * SUBTEST: pxp-stale-bo-bind-post-termination-irq
+ * Description: verify that VM bind on a stale BO (due to a termination irq) is rejected.
+ */
+
+/**
+ * SUBTEST: pxp-stale-bo-bind-post-suspend
+ * Description: verify that VM bind on a stale BO (due to a suspend/resume cycle)
+ *              is rejected.
+ */
+
+/**
+ * SUBTEST: pxp-stale-bo-bind-post-rpm
+ * Description: verify that VM bind on a stale BO (due to a runtime suspend/resume
+ *              cycle) is rejected.
+ */
+
+static void __test_pxp_stale_bo_bind(int fd, enum termination_type type, bool pxp)
+{
+	uint32_t vm, q;
+	uint32_t dstbo;
+	uint32_t flags = pxp ? DRM_XE_VM_BIND_FLAG_CHECK_PXP : 0;
+	int ret;
+
+	vm = xe_vm_create(fd, 0, 0);
+	q = create_pxp_rcs_queue(fd, vm);
+
+	dstbo = pxp_bo_create(fd, vm, 4096, DRM_XE_PXP_TYPE_HWDRM);
+
+	igt_assert_eq(submit_flush_store_dw(fd, q, true, vm, dstbo, true), 0);
+
+	/*
+	 * RPM cleanup is automatic as soon as we release the reference, so we
+	 * neet to make sure not to release it before other types of termination
+	 * have occurred.
+	 */
+	if (type == PXP_TERMINATION_RPM) {
+		xe_exec_queue_destroy(fd, q);
+		trigger_termination(fd, type);
+	} else {
+		trigger_termination(fd, type);
+		xe_exec_queue_destroy(fd, q);
+	}
+
+	/* trying to map a stale BO is an illegal op */
+	ret = __xe_vm_bind(fd, vm, 0, dstbo, 0, 0, 4096, DRM_XE_VM_BIND_OP_MAP,
+			   flags, NULL, 0, 0, DEFAULT_PAT_INDEX, 0);
+	igt_assert_eq(ret, pxp ? -ENOEXEC : 0);
+
+	gem_close(fd, dstbo);
+	xe_vm_destroy(fd, vm);
+}
+
+static void test_pxp_stale_bo_bind(int fd, enum termination_type type)
+{
+	__test_pxp_stale_bo_bind(fd, type, true);
+}
+
+static void pxp_vm_bind_sync(int fd, uint32_t vm, uint32_t bo, uint64_t addr,
+			     uint64_t size, uint32_t op)
+{
+	struct drm_xe_sync sync = {
+		.type = DRM_XE_SYNC_TYPE_SYNCOBJ,
+		.flags = DRM_XE_SYNC_FLAG_SIGNAL,
+		.handle = syncobj_create(fd, 0),
+	};
+
+	__xe_vm_bind_assert(fd, vm, 0, bo, 0, addr, size, op,
+			    DRM_XE_VM_BIND_FLAG_CHECK_PXP, &sync, 1, 0, 0);
+
+	igt_assert(syncobj_wait(fd, &sync.handle, 1, INT64_MAX, 0, NULL));
+	syncobj_destroy(fd, sync.handle);
+}
+
+static uint32_t create_and_bind_simple_pxp_batch(int fd, uint32_t vm,
+						 uint32_t size, uint64_t addr)
+{
+	uint32_t bo;
+	uint32_t *map;
+	bo = pxp_bo_create(fd, vm, 4096, DRM_XE_PXP_TYPE_HWDRM);
+	pxp_vm_bind_sync(fd, vm, bo, addr, size, DRM_XE_VM_BIND_OP_MAP);
+
+	map = xe_bo_map(fd, bo, 4096);
+	*map = MI_BATCH_BUFFER_END;
+	munmap(map, 4096);
+
+	return bo;
+}
+
+/**
+ * SUBTEST: pxp-stale-bo-exec-post-termination-irq
+ * Description: verify that a submission using VM with a mapped stale BO (due to
+ *              a termination irq) is rejected.
+ */
+
+/**
+ * SUBTEST: pxp-stale-bo-exec-post-suspend
+ * Description: verify that a submission using VM with a mapped stale BO (due to
+ *              a suspend/resume cycle) is rejected.
+ */
+
+/**
+ * SUBTEST: pxp-stale-bo-exec-post-rpm
+ * Description: verify that a submission using VM with a mapped stale BO (due to
+ *              a runtime suspend/resume cycle) is rejected.
+ */
+
+static void __test_pxp_stale_bo_exec(int fd, enum termination_type type, bool pxp)
+{
+	uint32_t vm, q;
+	uint32_t bo;
+	int expected;
+
+	vm = xe_vm_create(fd, 0, 0);
+
+	q = create_pxp_rcs_queue(fd, vm); /* start a PXP session */
+	bo = create_and_bind_simple_pxp_batch(fd, vm, 4096, 0);
+
+	/*
+	 * RPM cleanup is automatic as soon as we release the reference, so we
+	 * neet to make sure not to release it before other types of termination
+	 * have occurred.
+	 */
+	if (type == PXP_TERMINATION_RPM) {
+		xe_exec_queue_destroy(fd, q);
+		trigger_termination(fd, type);
+	} else {
+		trigger_termination(fd, type);
+		xe_exec_queue_destroy(fd, q);
+	}
+
+	/* create a clean queue using the VM with the invalid object mapped in */
+	if (pxp) {
+		q = create_pxp_rcs_queue(fd, vm);
+		expected = -ENOEXEC;
+	} else {
+		q = create_regular_rcs_queue(fd, vm);
+		expected = 0;
+	}
+
+	igt_assert_eq(xe_exec_sync_failable(fd, q, 0, NULL, 0), expected);
+
+	/* now make sure we can unmap the stale BO and have a clean exec after */
+	if (pxp) {
+		pxp_vm_bind_sync(fd, vm, 0, 0, 4096, DRM_XE_VM_BIND_OP_UNMAP);
+		gem_close(fd, bo);
+
+		bo = create_and_bind_simple_pxp_batch(fd, vm, 4096, 0);
+		igt_assert_eq(xe_exec_sync_failable(fd, q, 0, NULL, 0), 0);
+	}
+
+	xe_exec_queue_destroy(fd, q);
+	gem_close(fd, bo);
+	xe_vm_destroy(fd, vm);
+}
+
+static void test_pxp_stale_bo_exec(int fd, enum termination_type type)
+{
+	__test_pxp_stale_bo_exec(fd, type, true);
+}
+
+/**
+ * SUBTEST: pxp-stale-queue-post-termination-irq
+ * Description: verify that submissions on a stale queue (due to a termination
+ *              irq) are cancelled
+ */
+
+/**
+ * SUBTEST: pxp-stale-queue-post-suspend
+ * Description: verify that submissions on a stale queue (due to a suspend/resume
+ *              cycle) are cancelled
+ */
+
+static void test_pxp_stale_queue_execution(int fd, enum termination_type type)
+{
+	uint32_t vm, q;
+	uint32_t dstbo;
+
+	vm = xe_vm_create(fd, 0, 0);
+	q = create_pxp_rcs_queue(fd, vm);
+
+	dstbo = regular_bo_create_and_fill(fd, vm, 4096, 0);
+
+	igt_assert_eq(submit_flush_store_dw(fd, q, true, vm, dstbo, false), 0);
+
+	trigger_termination(fd, type);
+
+	/* when we execute an invalid queue we expect the job to be canceled */
+	igt_assert_eq(submit_flush_store_dw(fd, q, true, vm, dstbo, false), -ECANCELED);
+
+	gem_close(fd, dstbo);
+	xe_exec_queue_destroy(fd, q);
+	xe_vm_destroy(fd, vm);
+}
+
+/**
+ * SUBTEST: pxp-optout
+ * Description: verify that submssions with stale objects/queues are not blocked
+ *              if the user does not opt-in to the PXP checks.
+ */
+static void test_pxp_optout(int fd)
+{
+	__test_pxp_stale_bo_exec(fd, PXP_TERMINATION_IRQ, false);
+	__test_pxp_stale_bo_bind(fd, PXP_TERMINATION_IRQ, false);
+}
+
+static void termination_tests(int fd, enum termination_type type, const char *tag)
+{
+	if (type == PXP_TERMINATION_RPM)
+		igt_setup_runtime_pm(fd);
+
+	igt_subtest_f("pxp-termination-key-update-post-%s", tag)
+		test_pxp_teardown_keychange(fd, type);
+	igt_subtest_f("pxp-stale-bo-bind-post-%s", tag)
+		test_pxp_stale_bo_bind(fd, type);
+	igt_subtest_f("pxp-stale-bo-exec-post-%s", tag)
+		test_pxp_stale_bo_exec(fd, type);
+
+	/* An active PXP queue holds an RPM ref, so we can't test RPM with it */
+	if (type != PXP_TERMINATION_RPM)
+		igt_subtest_f("pxp-stale-queue-post-%s", tag)
+			test_pxp_stale_queue_execution(fd, type);
+	else
+		igt_restore_runtime_pm();
+}
+
 igt_main
 {
 	int xe_fd = -1;
@@ -465,6 +870,22 @@ igt_main
 			test_render_pxp_protsrc_to_protdest(xe_fd);
 	}
 
+	igt_subtest_group {
+		igt_fixture {
+			igt_require(pxp_supported);
+			devid = intel_get_drm_devid(xe_fd);
+			igt_assert(devid);
+		}
+		igt_describe("Verify teardown management");
+
+		termination_tests(xe_fd, PXP_TERMINATION_IRQ, "termination-irq");
+		termination_tests(xe_fd, PXP_TERMINATION_SUSPEND, "suspend");
+		termination_tests(xe_fd, PXP_TERMINATION_RPM, "rpm");
+
+		igt_subtest("pxp-optout")
+			test_pxp_optout(xe_fd);
+	}
+
 	igt_fixture {
 		close(xe_fd);
 	}
-- 
2.43.0



More information about the igt-dev mailing list