[Intel-gfx] [RFT i-g-t 1/2] tests/gem_shrink: Background, direct and OOM shrinker plus userptr tests
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Mon Dec 10 10:11:06 UTC 2018
From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
...
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
lib/igt_core.c | 19 +++
lib/igt_core.h | 1 +
tests/i915/gem_shrink.c | 348 ++++++++++++++++++++++++++++++++++++++++
3 files changed, 368 insertions(+)
diff --git a/lib/igt_core.c b/lib/igt_core.c
index 64883d6402af..a22c4b077d85 100644
--- a/lib/igt_core.c
+++ b/lib/igt_core.c
@@ -1680,6 +1680,25 @@ void igt_stop_helper(struct igt_helper_process *proc)
assert(helper_was_alive(proc, status));
}
+/**
+ * igt_try_stop_helper:
+ * @proc: #igt_helper_process structure
+ *
+ * Terminates a helper process if it is still running and returns true, or false
+ * if the process wasn't running.
+ */
+bool igt_try_stop_helper(struct igt_helper_process *proc)
+{
+ int status;
+
+ /* failure here means the pid is already dead and so waiting is safe */
+ kill(proc->pid, proc->use_SIGKILL ? SIGKILL : SIGTERM);
+
+ status = igt_wait_helper(proc);
+
+ return helper_was_alive(proc, status);
+}
+
static void children_exit_handler(int sig)
{
int status;
diff --git a/lib/igt_core.h b/lib/igt_core.h
index 6f8c3852a686..ed5ceebf1205 100644
--- a/lib/igt_core.h
+++ b/lib/igt_core.h
@@ -795,6 +795,7 @@ bool __igt_fork_helper(struct igt_helper_process *proc);
for (; __igt_fork_helper(proc); exit(0))
int igt_wait_helper(struct igt_helper_process *proc);
void igt_stop_helper(struct igt_helper_process *proc);
+bool igt_try_stop_helper(struct igt_helper_process *proc);
/* exit handler code */
diff --git a/tests/i915/gem_shrink.c b/tests/i915/gem_shrink.c
index c8e05814ee70..145f9d35e584 100644
--- a/tests/i915/gem_shrink.c
+++ b/tests/i915/gem_shrink.c
@@ -26,6 +26,10 @@
*
* Exercise the shrinker by overallocating GEM objects
*/
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <fcntl.h>
#include "igt.h"
#include "igt_gt.h"
@@ -366,6 +370,329 @@ static void reclaim(unsigned engine, int timeout)
close(fd);
}
+static unsigned long get_meminfo(const char *info, const char *tag)
+{
+ const char *str;
+ unsigned long val;
+
+ str = strstr(info, tag);
+ if (str && sscanf(str + strlen(tag), " %lu", &val) == 1)
+ return val >> 10;
+
+ igt_warn("Unrecognised /proc/meminfo field: '%s'\n", tag);
+ return 0;
+}
+
+static unsigned long get_avail_ram_mb(void)
+{
+ int fd;
+ int ret;
+ char buf[4096];
+ unsigned long ram;
+
+ fd = open("/proc/meminfo", O_RDONLY);
+ igt_assert_fd(fd);
+
+ ret = read(fd, buf, sizeof(buf));
+ igt_assert(ret >= 0);
+
+ close(fd);
+
+ ram = get_meminfo(buf, "MemAvailable:");
+ ram += get_meminfo(buf, "Buffers:");
+ ram += get_meminfo(buf, "Cached:");
+ ram += get_meminfo(buf, "SwapCached:");
+
+ return ram;
+}
+
+struct test {
+#define TEST_BO (1)
+#define TEST_USERPTR (2)
+ unsigned int flags;
+ int fd;
+};
+
+static uint32_t __get_pages(int fd, unsigned long alloc)
+{
+ uint32_t handle = gem_create(fd, alloc);
+
+ gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, 0);
+ gem_madvise(fd, handle, I915_MADV_DONTNEED);
+
+ return handle;
+}
+
+struct test_obj {
+ void *ptr;
+ uint32_t handle;
+};
+
+#define PAGE_SIZE 4096
+static void
+__get_userptr(int fd, struct test_obj *obj, unsigned long sz)
+{
+ struct local_i915_gem_userptr userptr = { };
+ void *ptr;
+
+ igt_assert_eq(sz & 4095, 0);
+
+ ptr = mmap(NULL, sz, PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ assert(ptr != MAP_FAILED);
+
+ for (size_t page = 0; page < sz; page += PAGE_SIZE)
+ *(volatile uint32_t *)((unsigned char *)ptr + page) = 0;
+
+ userptr.user_size = sz;
+ userptr.user_ptr = to_user_pointer(ptr);
+ do_ioctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &userptr);
+
+ gem_set_domain(fd, userptr.handle, I915_GEM_DOMAIN_GTT, 0);
+ gem_madvise(fd, userptr.handle, I915_MADV_DONTNEED);
+
+ obj->ptr = ptr;
+ obj->handle = userptr.handle;
+}
+
+static void *mempressure(void *arg)
+{
+ struct test_obj *list = NULL;
+ struct test *test = arg;
+ const unsigned int sz_mb = 2;
+ const unsigned int sz = sz_mb << 20;
+ unsigned int n = 0, max = 0;
+ unsigned int blocks;
+
+ igt_debug("mempressure flags=%x\n", test->flags);
+
+ for (;;) {
+ unsigned long ram_mb = get_avail_ram_mb();
+
+ if (!list) {
+ blocks = ram_mb / sz_mb;
+ list = calloc(blocks, sizeof(*list));
+ igt_assert(list);
+ } else if (ram_mb < 256) {
+ blocks = max + 1;
+ }
+
+ if (list[n].ptr || list[n].handle) {
+ if (test->flags & TEST_USERPTR) {
+ munmap(list[n].ptr, sz);
+ gem_close(test->fd, list[n].handle);
+ } else if (test->flags & TEST_BO) {
+ gem_close(test->fd, list[n].handle);
+ } else {
+ munmap(list[n].ptr, sz);
+ }
+ }
+
+ if (test->flags & TEST_BO) {
+ list[n].handle = __get_pages(test->fd, sz);
+ } else if (test->flags & TEST_USERPTR) {
+ __get_userptr(test->fd, &list[n], sz);
+ } else {
+ list[n].ptr = mmap(NULL, sz, PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ assert(list[n].ptr != MAP_FAILED);
+
+ madvise(list[n].ptr, sz, MADV_HUGEPAGE);
+
+ for (size_t page = 0; page < sz; page += PAGE_SIZE)
+ *(volatile uint32_t *)((unsigned char *)list[n].ptr + page) = 0;
+ }
+
+ if (n > max)
+ max = n;
+
+ n++;
+
+ if (n >= blocks)
+ n = 0;
+ }
+
+ return NULL;
+}
+
+static void oom_adjust(const char *score)
+{
+ int fd;
+
+ fd = open("/proc/self/oom_score_adj", O_WRONLY);
+ igt_assert_fd(fd);
+ igt_assert(write(fd, score, sizeof(score)) == sizeof(score));
+ close(fd);
+}
+
+static void trigger_oom(void)
+{
+ const char *cmd = "f";
+ int fd;
+
+ fd = open("/proc/sysrq-trigger", O_WRONLY);
+ igt_assert_fd(fd);
+ igt_assert(write(fd, cmd, sizeof(cmd)) == sizeof(cmd));
+ close(fd);
+}
+
+static bool has_sysrq_trigger(void)
+{
+ int fd;
+
+ fd = open("/proc/sysrq-trigger", O_WRONLY);
+ close(fd);
+
+ return fd >= 0;
+}
+
+static void reclaim_oom(unsigned int flags)
+{
+ unsigned int count = 0;
+
+ oom_adjust("-1000");
+
+ do {
+ struct igt_helper_process gem_child = { .use_SIGKILL = true };
+ struct igt_helper_process mem_child = { .use_SIGKILL = true };
+ struct igt_helper_process eb_child = { .use_SIGKILL = true };
+ struct igt_helper_process drop_child = { .use_SIGKILL = true };
+
+ igt_debug("Iteration %u...\n", ++count);
+
+ igt_fork_helper(&mem_child) {
+ struct test test = { };
+
+ if ((flags & (TEST_BO | TEST_USERPTR)) ==
+ (TEST_BO | TEST_USERPTR))
+ test.flags = TEST_BO;
+
+ oom_adjust("500");
+
+ if (test.flags == TEST_BO) {
+ test.fd = drm_open_driver_render(DRIVER_INTEL);
+ igt_require_gem(test.fd);
+ }
+
+ mempressure(&test);
+
+ if (test.flags == TEST_BO)
+ close(test.fd);
+ }
+
+ igt_fork_helper(&gem_child) {
+ struct test test = { .flags = flags };
+
+ if ((flags & (TEST_BO | TEST_USERPTR)) ==
+ (TEST_BO | TEST_USERPTR))
+ test.flags = TEST_USERPTR;
+
+ oom_adjust("500");
+
+ test.fd = drm_open_driver_render(DRIVER_INTEL);
+ igt_require_gem(test.fd);
+
+ mempressure(&test);
+
+ close(test.fd);
+ }
+
+ igt_fork_helper(&eb_child) {
+ struct test test = { .flags = flags };
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj = { };
+ struct drm_i915_gem_execbuffer2 execbuf = { };
+
+ execbuf.buffers_ptr = to_user_pointer(&obj);
+ execbuf.buffer_count = 1;
+
+ test.fd = drm_open_driver_render(DRIVER_INTEL);
+ igt_require_gem(test.fd);
+
+ for (;;) {
+ unsigned long eb = 0;
+ struct timespec ts = { };
+ unsigned long start;
+
+ igt_nsec_elapsed(&ts);
+ start = igt_nsec_elapsed(&ts) / 1e6;
+
+ for (;;) {
+ unsigned long now;
+
+ obj.handle = gem_create(test.fd, 4096);
+ gem_write(test.fd, obj.handle, 0, &bbe,
+ sizeof(bbe));
+ gem_execbuf(test.fd, &execbuf);
+ eb++;
+ now = igt_nsec_elapsed(&ts) / 1e6;
+ if (now > (start + 1000)) {
+ gem_sync(test.fd, obj.handle);
+ if (now > (start + 2000)) {
+ gem_close(test.fd,
+ obj.handle);
+ break;
+ }
+ }
+ gem_close(test.fd, obj.handle);
+ }
+
+ igt_debug("%lu eb\n", eb);
+ usleep(500e3);
+ }
+
+ close(test.fd);
+ }
+
+ igt_fork_helper(&drop_child) {
+ int fd;
+
+ fd = drm_open_driver(DRIVER_INTEL);
+ igt_require_gem(fd);
+
+ for (;;) {
+ usleep(334e3);
+ igt_drop_caches_set(fd, DROP_ACTIVE);
+ }
+
+ close(fd);
+ }
+
+ for (unsigned long ram_mb = 0;
+ (ram_mb = get_avail_ram_mb()) > 512;) {
+ int status;
+ pid_t pid;
+
+ igt_debug("[%u] %lu free mb\n", count, ram_mb);
+
+ pid = waitpid(mem_child.pid, &status, WNOHANG);
+ if (pid)
+ break;
+
+ pid = waitpid(gem_child.pid, &status, WNOHANG);
+ if (pid)
+ break;
+
+ pid = waitpid(eb_child.pid, &status, WNOHANG);
+ igt_assert_eq(pid, 0);
+
+ pid = waitpid(drop_child.pid, &status, WNOHANG);
+ igt_assert_eq(pid, 0);
+
+ sleep(1);
+ }
+
+ igt_debug("Triggering OOM\n");
+ trigger_oom();
+
+ sleep(1);
+
+ igt_try_stop_helper(&mem_child);
+ igt_try_stop_helper(&gem_child);
+ igt_stop_helper(&eb_child);
+ igt_stop_helper(&drop_child);
+ } while (count < 3);
+}
+
igt_main
{
const struct test {
@@ -432,6 +759,27 @@ igt_main
igt_subtest("reclaim")
reclaim(I915_EXEC_DEFAULT, 2);
+ igt_subtest_group {
+ igt_fixture {
+ igt_require(has_sysrq_trigger());
+ }
+
+ igt_subtest("two-reclaims-and-oom")
+ reclaim_oom(TEST_BO);
+
+ igt_subtest("two-reclaims-and-oom-userptr") {
+ igt_require(has_userptr());
+
+ reclaim_oom(TEST_USERPTR);
+ }
+
+ igt_subtest("two-reclaims-and-oom-both") {
+ igt_require(has_userptr());
+
+ reclaim_oom(TEST_BO | TEST_USERPTR);
+ }
+ }
+
for(const struct test *t = tests; t->name; t++) {
for(const struct mode *m = modes; m->suffix; m++) {
igt_subtest_f("%s%s", t->name, m->suffix)
--
2.19.1
More information about the Intel-gfx
mailing list