[Intel-gfx] [PATCH i-g-t 2/2] tests/gem_exec_mem_huge.c: New test to stress eviction

Piotr Luc piotr.luc at intel.com
Tue Mar 22 17:26:48 UTC 2016


From: Piotr Luc <Piotr.Luc at intel.com>

The aim of the test is to stress i915 memory eviction mechanism.
The test runs many processes, each of it executes NOP batch
which has attached really big data buffers chain.

While this is unusual use case, rather it doesn't occur in
normal games or applications, it possible to imagine that an
offending application can use the weakness of low memory handler
and i915 buffer eviction mechanism to conduct DoS attack.
Note that achieving this goal on standard PC could be difficult,
it may be easy in a system with limited capabilities (little memory,
lack of swap, a small number of cores).

Signed-off-by: Piotr Luc <Piotr.Luc at intel.com>
---
 tests/Makefile.sources    |   1 +
 tests/gem_exec_mem_huge.c | 196 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 197 insertions(+)
 create mode 100644 tests/gem_exec_mem_huge.c

diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 43f232f..b265f19 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -137,6 +137,7 @@ TESTS_progs = \
 	gem_ctx_thrash \
 	gem_double_irq_loop \
 	gem_exec_big \
+	gem_exec_mem_huge \
 	gem_exec_blt \
 	gem_exec_lut_handle \
 	gem_fd_exhaustion \
diff --git a/tests/gem_exec_mem_huge.c b/tests/gem_exec_mem_huge.c
new file mode 100644
index 0000000..58fb83b
--- /dev/null
+++ b/tests/gem_exec_mem_huge.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Piotr Luc <piotr.luc at intel.com>
+ *
+ */
+
+/*
+ * Testcase: run a nop batch which has attached really big data buffers chain.
+ *
+ * Stress-test the low memory handler and buffer eviction interaction.
+ *
+ * Expectation:
+ *   The test doesn't hang HW or SW on systems with swap; the run may take a long
+ *   time, but finally, the test finishes. On the system without swap but with
+ *   low memory killer (like Android) the test process should be killed quickly
+ *   but the system should remain functional.
+ */
+
+#include "igt.h"
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <sys/wait.h>
+#include <sys/sysinfo.h>
+#include "drm.h"
+#include <math.h>
+
+IGT_TEST_DESCRIPTION("Run a small batch with large data BO to stress test the"
+		     "low memory killer handling");
+
+#define MAX_PROCESSES 256
+#define MAX_DATA_BUF (2*4)
+#define DATA_BUF_SIZE (256 * 1024 * 1024/2)
+
+#ifdef ANDROID
+#define NUMBER_OF_INITIAL_PROCESSES 2
+#define MT_FACTOR 1
+#else
+/* Linux systems have swap memory and usually many cores that can allow the
+ * first test process to finish before next processes start stressing memory.
+ * The bigger number of processes allows reach OOM state more likely.
+ */
+#define NUMBER_OF_INITIAL_PROCESSES 2
+#define MT_FACTOR 1
+#endif
+
+static void exec1(int fd, uint32_t handle, uint32_t *dhandles, unsigned count)
+{
+	struct drm_i915_gem_execbuffer2 execbuf = {0};
+	struct drm_i915_gem_exec_object2 gem_exec[MAX_DATA_BUF + 1] = {0};
+	unsigned i = 0;
+
+	for (i = 0; i != count && i < MAX_DATA_BUF; ++i) {
+		gem_exec[i].handle = dhandles[i];
+	}
+	gem_exec[i].handle = handle;
+
+	execbuf.buffers_ptr = (uintptr_t)gem_exec;
+	execbuf.buffer_count = i + 1;
+	execbuf.batch_start_offset = 0;
+	execbuf.batch_len = 8; // Just BB_END
+	execbuf.cliprects_ptr = 0;
+	execbuf.num_cliprects = 0;
+	execbuf.DR1 = 0;
+	execbuf.DR4 = 0;
+	execbuf.flags = 0;
+	i915_execbuffer2_set_context_id(execbuf, 0);
+	execbuf.rsvd2 = 0;
+
+	gem_execbuf(fd, &execbuf);
+	gem_sync(fd, handle);
+}
+
+static void run_test(unsigned pn)
+{
+	int fd;
+	uint32_t handle;
+	uint32_t batch[2] = {MI_BATCH_BUFFER_END, MI_BATCH_BUFFER_END};
+	uint32_t dhandles[MAX_DATA_BUF];
+	unsigned count = 0;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+
+	handle = gem_create(fd, 4096);
+	gem_write(fd, handle, 0, batch, sizeof(batch));
+
+	for (int i = MAX_DATA_BUF - 1; i >= 0; --i) {
+		dhandles[count++] = gem_create(fd, DATA_BUF_SIZE);
+		igt_debug("process: %u, all %u data buffers\n", pn, count);
+		exec1(fd, handle, dhandles, count);
+	}
+	close(fd);
+}
+
+igt_simple_main
+{
+	pid_t pid_table[MAX_PROCESSES];
+	uint64_t memsize;
+	unsigned max_processes;
+	struct sysinfo sysinf;
+	unsigned nprocs = sysconf(_SC_NPROCESSORS_ONLN);
+	int ret;
+
+	ret = sysinfo(&sysinf);
+	igt_assert(ret == 0);
+
+	/* The swap file significantly reduces chance to hit OOM state so
+	 * this test should be run on devices without swap to get it quickly. */
+	igt_assert(0 == intel_get_total_swap_mb());
+
+	memsize = intel_get_total_ram_mb();
+	memsize *= 1024 * 1024; /* to obtain size in Bytes */
+
+	/* compute the number processes that are enough to consume memory */
+	max_processes = NUMBER_OF_INITIAL_PROCESSES +
+			((memsize / DATA_BUF_SIZE) / MAX_DATA_BUF) * MT_FACTOR;
+
+	/* However if a device have many processors, it's possible that
+	 * some processes finish so quick that the maximum total
+	 * memory footprint will be below the low memory trigger.
+	 * The following computation is supposed to increase a number of
+	 * test processes to obtain required effect. */
+	if (max_processes < 2 * sysinf.procs) {
+		max_processes *= (1.0 + sqrt(nprocs + 1));
+	}
+	max_processes = min(max_processes, MAX_PROCESSES);
+
+	igt_debug("Number of processes: %u.\n", max_processes);
+	memset(pid_table, 0, sizeof(pid_table));
+
+	for (unsigned process = 0; process < max_processes; ++process) {
+		pid_t pid = fork();
+		if (pid == 0) {
+			run_test(process);
+			igt_debug("Process %u done.\n", process);
+			igt_exit();
+		} else if (pid == -1) {
+			// fork failed;
+		} else {
+			pid_table[process] = pid;
+		}
+	}
+
+	if (max_processes) {
+		unsigned children = max_processes;
+		int status = -1;
+		pid_t pid;
+
+		while (children) {
+
+			pid = wait(&status);
+			if (pid == -1)
+				continue;
+
+			for (unsigned process = 0; process < max_processes;
+			     ++process) {
+				if (pid_table[process] == 0) {
+					continue;
+				}
+				if (pid_table[process] == pid)
+					pid_table[process] = 0;
+				children--;
+				break;
+			}
+		}
+	}
+}
-- 
2.5.0



More information about the Intel-gfx mailing list