[Intel-gfx] [PATCH i-g-t 9/9] tests/perf: Add testcase to verify association of OABUFFER reports with CS properties

Sagar Arun Kamble sagar.a.kamble at intel.com
Wed Sep 13 10:52:08 UTC 2017


Signed-off-by: Sagar Arun Kamble <sagar.a.kamble at intel.com>
---
 tests/intel_perf_dapc.c | 152 +++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 143 insertions(+), 9 deletions(-)

diff --git a/tests/intel_perf_dapc.c b/tests/intel_perf_dapc.c
index acc027b..5d935bf 100644
--- a/tests/intel_perf_dapc.c
+++ b/tests/intel_perf_dapc.c
@@ -686,10 +686,10 @@ context_get_hw_ctx_id(int fd, unsigned int ctx)
 }
 
 static void
-perf_stream_capture_workload_samples(struct drm_i915_perf_open_param *param,
-				     uint8_t *perf_reports,
-				     int num_reports, int report_size,
-				     uint64_t *hw_ctx_id)
+__perf_stream_capture_workload_samples(struct drm_i915_perf_open_param *param,
+				       uint8_t *perf_reports,
+				       int num_reports, int report_size,
+				       uint64_t *hw_ctx_id, int num_batches)
 {
 	drm_intel_bufmgr *bufmgr;
 	drm_intel_context *context0;
@@ -701,8 +701,10 @@ perf_stream_capture_workload_samples(struct drm_i915_perf_open_param *param,
 	int stream_fd;
 	int ret;
 	bool valid_data = false;
+	int batches;
 
 retry:
+	batches = num_batches;
 	bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
 	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
 
@@ -724,12 +726,15 @@ retry:
 	igt_debug("opening i915-perf stream\n");
 	stream_fd = __perf_open(drm_fd, param);
 
-	render_copy(batch,
-		    context0,
-		    &src, 0, 0, width, height,
-		    &dst, 0, 0);
+	while (batches--) {
+		render_copy(batch,
+			    context0,
+			    &src, 0, 0, width, height,
+			    &dst, 0, 0);
+		usleep(1000);
 
-	intel_batchbuffer_flush_with_context(batch, context0);
+		intel_batchbuffer_flush_with_context(batch, context0);
+	}
 
 	drm_intel_bo_unreference(src.bo);
 	drm_intel_bo_unreference(dst.bo);
@@ -747,6 +752,16 @@ retry:
 	}
 }
 
+static void
+perf_stream_capture_workload_samples(struct drm_i915_perf_open_param *param,
+				     uint8_t *perf_reports,
+				     int num_reports, int report_size,
+				     uint64_t *hw_ctx_id)
+{
+	__perf_stream_capture_workload_samples(param, perf_reports, num_reports,
+					     report_size, hw_ctx_id, 1);
+}
+
 struct oa_source_sample {
 	uint64_t source;
 	uint64_t ctx_id;
@@ -1631,6 +1646,122 @@ test_concurrent_streams(void)
 	igt_assert_eq(intel_detect_and_clear_missed_interrupts(drm_fd), 0);
 }
 
+struct oa_ctxid_pid_sample {
+	uint64_t source;
+	uint64_t ctx_id;
+	uint64_t pid;
+	uint8_t oa_report[];
+};
+
+#define INVALID_CTX_ID	0xffffffff
+#define INVALID_PID	0xffffffff
+
+static void
+verify_oa_association(uint8_t *perf_reports, int num_reports, size_t report_size)
+{
+	struct oa_ctxid_pid_sample *sample;
+	uint32_t *oa_report;
+	uint64_t last_ctx_id = INVALID_CTX_ID;
+	uint64_t last_pid = INVALID_PID;
+	uint32_t last_ts = 0;
+	bool in_batch = false;
+
+	for (int i = 0; i < num_reports; i++) {
+		size_t offset = i * report_size;
+
+		sample = (struct oa_ctxid_pid_sample *) (perf_reports + offset);
+		oa_report = (uint32_t *) sample->oa_report;
+
+		igt_debug("read report: source= %s, reason = %x, "
+			  "timestamp = %x\n",
+			  SOURCE(sample->source), oa_report[0], oa_report[1]);
+
+		igt_assert(last_ts <= oa_report[1]);
+		last_ts = oa_report[1];
+
+		igt_assert((sample->source ==
+			    local_I915_PERF_SAMPLE_OA_SOURCE_OABUFFER) ||
+			   (sample->source ==
+			    local_I915_PERF_SAMPLE_OA_SOURCE_CS));
+
+		if (sample->source == local_I915_PERF_SAMPLE_OA_SOURCE_CS) {
+			igt_assert(!oa_report[0]);
+			if (!in_batch) {
+				last_ctx_id = sample->ctx_id;
+				last_pid = sample->pid;
+				in_batch = true;
+			} else {
+				last_ctx_id = INVALID_CTX_ID;
+				last_pid = INVALID_PID;
+				in_batch = false;
+			}
+		} else {
+				igt_debug("in_batch: %s ctx_id = %lu, pid = %lu, "
+					  "sample ctx_id = %lu, sample pid = %lu\n",
+					  in_batch ? "yes" : "no", last_ctx_id,
+					  last_pid, sample->ctx_id, sample->pid);
+
+				if (in_batch)
+					igt_assert(sample->ctx_id == last_ctx_id);
+				igt_assert(sample->pid == last_pid);
+		}
+	}
+}
+
+static void
+test_oa_data_association(void)
+{
+	uint64_t oa_exp_1_msec = max_oa_exponent_for_period_lte(1000000);
+
+	uint64_t properties[] = {
+		/* Include OA reports in samples */
+		DRM_I915_PERF_PROP_SAMPLE_OA, true,
+
+		/* OA unit configuration */
+		DRM_I915_PERF_PROP_OA_METRICS_SET, test_metric_set_id,
+		DRM_I915_PERF_PROP_OA_FORMAT, test_oa_format,
+		DRM_I915_PERF_PROP_OA_EXPONENT, oa_exp_1_msec,
+
+		/* CS parameters */
+		local_DRM_I915_PERF_PROP_ENGINE, I915_EXEC_RENDER,
+		local_DRM_I915_PERF_PROP_SAMPLE_OA_SOURCE, true,
+		local_DRM_I915_PERF_PROP_SAMPLE_CTX_ID, true,
+		local_DRM_I915_PERF_PROP_SAMPLE_PID, true,
+	};
+	struct drm_i915_perf_open_param param = {
+		.flags = I915_PERF_FLAG_FD_CLOEXEC,
+		.num_properties = sizeof(properties) / 16,
+		.properties_ptr = to_user_pointer(properties),
+	};
+
+	/* should be default, but just to be sure... */
+	write_u64_file("/proc/sys/dev/i915/perf_stream_paranoid", 1);
+
+	igt_fork(child, 1) {
+		int prop_size = ARRAY_SIZE(properties);
+		int num_reports = 100;
+		int report_size = get_perf_report_size(properties, prop_size,
+						       test_oa_format);
+		int total_size = num_reports * report_size;
+		uint8_t *perf_reports = malloc(total_size);
+
+		igt_assert(perf_reports);
+
+		/*
+		 * TODO: in some runs, OABUFFER periodic reports are not triggered
+		 * during execution of workload, hence increasing the execbuf
+		 * duration might be needed.
+		 */
+		__perf_stream_capture_workload_samples(&param, perf_reports,
+						     num_reports, report_size,
+						     NULL, 10);
+		verify_oa_association(perf_reports, num_reports, report_size);
+		free(perf_reports);
+	}
+
+	igt_waitchildren();
+}
+
 igt_main
 {
 	igt_skip_on_simulation();
@@ -1673,6 +1804,9 @@ igt_main
 	igt_subtest("concurrent-streams")
 		test_concurrent_streams();
 
+	igt_subtest("oa-data-association")
+		test_oa_data_association();
+
 	igt_fixture {
 		close(drm_fd);
 	}
-- 
1.9.1



More information about the Intel-gfx mailing list