[PATCH v2 07/14] drm/etnaviv: add GEM submit and cmdstream validation bits

Lucas Stach l.stach at pengutronix.de
Wed Dec 9 03:48:20 PST 2015


From: The etnaviv authors <dri-devel at lists.freedesktop.org>

This adds the GEM submit functionality and the needed commandstream
validation and GPU buffer handling bits to go with that.

Signed-off-by: Christian Gmeiner <christian.gmeiner at gmail.com>
Signed-off-by: Russell King <rmk+kernel at arm.linux.org.uk>
Signed-off-by: Lucas Stach <l.stach at pengutronix.de>
---
 drivers/gpu/drm/etnaviv/etnaviv_buffer.c     | 268 ++++++++++++++++
 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c | 209 +++++++++++++
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 438 +++++++++++++++++++++++++++
 3 files changed, 915 insertions(+)
 create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_buffer.c
 create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
 create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
new file mode 100644
index 000000000000..332c55ebba6d
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2014 Etnaviv Project
+ * Author: Christian Gmeiner <christian.gmeiner at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_mmu.h"
+
+#include "common.xml.h"
+#include "state.xml.h"
+#include "cmdstream.xml.h"
+
+/*
+ * Command Buffer helper:
+ */
+
+
+static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
+{
+	u32 *vaddr = (u32 *)buffer->vaddr;
+
+	BUG_ON(buffer->user_size >= buffer->size);
+
+	vaddr[buffer->user_size / 4] = data;
+	buffer->user_size += 4;
+}
+
+static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
+	u32 reg, u32 value)
+{
+	u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
+
+	buffer->user_size = ALIGN(buffer->user_size, 8);
+
+	/* write a register via cmd stream */
+	OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
+		    VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
+		    VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
+	OUT(buffer, value);
+}
+
+static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
+{
+	buffer->user_size = ALIGN(buffer->user_size, 8);
+
+	OUT(buffer, VIV_FE_END_HEADER_OP_END);
+}
+
+static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
+{
+	buffer->user_size = ALIGN(buffer->user_size, 8);
+
+	OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
+}
+
+static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
+	u16 prefetch, u32 address)
+{
+	buffer->user_size = ALIGN(buffer->user_size, 8);
+
+	OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
+		    VIV_FE_LINK_HEADER_PREFETCH(prefetch));
+	OUT(buffer, address);
+}
+
+static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
+	u32 from, u32 to)
+{
+	buffer->user_size = ALIGN(buffer->user_size, 8);
+
+	OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
+	OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
+}
+
+static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe)
+{
+	u32 flush;
+	u32 stall;
+
+	/*
+	 * This assumes that if we're switching to 2D, we're switching
+	 * away from 3D, and vice versa.  Hence, if we're switching to
+	 * the 2D core, we need to flush the 3D depth and color caches,
+	 * otherwise we need to flush the 2D pixel engine cache.
+	 */
+	if (pipe == ETNA_PIPE_2D)
+		flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
+	else
+		flush = VIVS_GL_FLUSH_CACHE_PE2D;
+
+	stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) |
+		VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE);
+
+	CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
+	CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall);
+
+	CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+
+	CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
+		       VIVS_GL_PIPE_SELECT_PIPE(pipe));
+}
+
+static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf)
+{
+	return buf->paddr - gpu->memory_base;
+}
+
+static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
+	struct etnaviv_cmdbuf *buf, u32 off, u32 len)
+{
+	u32 size = buf->size;
+	u32 *ptr = buf->vaddr + off;
+
+	dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
+			ptr, gpu_va(gpu, buf) + off, size - len * 4 - off);
+
+	print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
+			ptr, len * 4, 0);
+}
+
+u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
+{
+	struct etnaviv_cmdbuf *buffer = gpu->buffer;
+
+	/* initialize buffer */
+	buffer->user_size = 0;
+
+	CMD_WAIT(buffer);
+	CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4);
+
+	return buffer->user_size / 8;
+}
+
+void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
+{
+	struct etnaviv_cmdbuf *buffer = gpu->buffer;
+
+	/* Replace the last WAIT with an END */
+	buffer->user_size -= 16;
+
+	CMD_END(buffer);
+	mb();
+}
+
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
+	struct etnaviv_cmdbuf *cmdbuf)
+{
+	struct etnaviv_cmdbuf *buffer = gpu->buffer;
+	u32 *lw = buffer->vaddr + buffer->user_size - 16;
+	u32 back, link_target, link_size, reserve_size, extra_size = 0;
+
+	if (drm_debug & DRM_UT_DRIVER)
+		etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
+
+	/*
+	 * If we need to flush the MMU prior to submitting this buffer, we
+	 * will need to append a mmu flush load state, followed by a new
+	 * link to this buffer - a total of four additional words.
+	 */
+	if (gpu->mmu->need_flush || gpu->switch_context) {
+		/* link command */
+		extra_size += 2;
+		/* flush command */
+		if (gpu->mmu->need_flush)
+			extra_size += 2;
+		/* pipe switch commands */
+		if (gpu->switch_context)
+			extra_size += 8;
+	}
+
+	reserve_size = (6 + extra_size) * 4;
+
+	/*
+	 * if we are going to completely overflow the buffer, we need to wrap.
+	 */
+	if (buffer->user_size + reserve_size > buffer->size)
+		buffer->user_size = 0;
+
+	/* save offset back into main buffer */
+	back = buffer->user_size + reserve_size - 6 * 4;
+	link_target = gpu_va(gpu, buffer) + buffer->user_size;
+	link_size = 6;
+
+	/* Skip over any extra instructions */
+	link_target += extra_size * sizeof(u32);
+
+	if (drm_debug & DRM_UT_DRIVER)
+		pr_info("stream link to 0x%08x @ 0x%08x %p\n",
+			link_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
+
+	/* jump back from cmd to main buffer */
+	CMD_LINK(cmdbuf, link_size, link_target);
+
+	link_target = gpu_va(gpu, cmdbuf);
+	link_size = cmdbuf->size / 8;
+
+
+
+	if (drm_debug & DRM_UT_DRIVER) {
+		print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
+			       cmdbuf->vaddr, cmdbuf->size, 0);
+
+		pr_info("link op: %p\n", lw);
+		pr_info("link addr: %p\n", lw + 1);
+		pr_info("addr: 0x%08x\n", link_target);
+		pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back);
+		pr_info("event: %d\n", event);
+	}
+
+	if (gpu->mmu->need_flush || gpu->switch_context) {
+		u32 new_target = gpu_va(gpu, buffer) + buffer->user_size;
+
+		if (gpu->mmu->need_flush) {
+			/* Add the MMU flush */
+			CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
+				       VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
+				       VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
+				       VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
+				       VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
+				       VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
+
+			gpu->mmu->need_flush = false;
+		}
+
+		if (gpu->switch_context) {
+			etnaviv_cmd_select_pipe(buffer, cmdbuf->exec_state);
+			gpu->switch_context = false;
+		}
+
+		/* And the link to the first buffer */
+		CMD_LINK(buffer, link_size, link_target);
+
+		/* Update the link target to point to above instructions */
+		link_target = new_target;
+		link_size = extra_size;
+	}
+
+	/* trigger event */
+	CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
+		       VIVS_GL_EVENT_FROM_PE);
+
+	/* append WAIT/LINK to main buffer */
+	CMD_WAIT(buffer);
+	CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4));
+
+	/* Change WAIT into a LINK command; write the address first. */
+	*(lw + 1) = link_target;
+	mb();
+	*(lw) = VIV_FE_LINK_HEADER_OP_LINK |
+		VIV_FE_LINK_HEADER_PREFETCH(link_size);
+	mb();
+
+	if (drm_debug & DRM_UT_DRIVER)
+		etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
new file mode 100644
index 000000000000..dcfd565c88d1
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+
+#include "cmdstream.xml.h"
+
+#define EXTRACT(val, field) (((val) & field##__MASK) >> field##__SHIFT)
+
+struct etna_validation_state {
+	struct etnaviv_gpu *gpu;
+	const struct drm_etnaviv_gem_submit_reloc *relocs;
+	unsigned int num_relocs;
+	u32 *start;
+};
+
+static const struct {
+	u16 offset;
+	u16 size;
+} etnaviv_sensitive_states[] __initconst = {
+#define ST(start, num) { (start) >> 2, (num) }
+	/* 2D */
+	ST(0x1200, 1),
+	ST(0x1228, 1),
+	ST(0x1238, 1),
+	ST(0x1284, 1),
+	ST(0x128c, 1),
+	ST(0x1304, 1),
+	ST(0x1310, 1),
+	ST(0x1318, 1),
+	ST(0x12800, 4),
+	ST(0x128a0, 4),
+	ST(0x128c0, 4),
+	ST(0x12970, 4),
+	ST(0x12a00, 8),
+	ST(0x12b40, 8),
+	ST(0x12b80, 8),
+	ST(0x12ce0, 8),
+	/* 3D */
+	ST(0x0644, 1),
+	ST(0x064c, 1),
+	ST(0x0680, 8),
+	ST(0x1410, 1),
+	ST(0x1430, 1),
+	ST(0x1458, 1),
+	ST(0x1460, 8),
+	ST(0x1480, 8),
+	ST(0x1500, 8),
+	ST(0x1520, 8),
+	ST(0x1608, 1),
+	ST(0x1610, 1),
+	ST(0x1658, 1),
+	ST(0x165c, 1),
+	ST(0x1664, 1),
+	ST(0x1668, 1),
+	ST(0x16a4, 1),
+	ST(0x16c0, 8),
+	ST(0x16e0, 8),
+	ST(0x1740, 8),
+	ST(0x2400, 14 * 16),
+	ST(0x10800, 32 * 16),
+#undef ST
+};
+
+#define ETNAVIV_STATES_SIZE (VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK + 1u)
+static DECLARE_BITMAP(etnaviv_states, ETNAVIV_STATES_SIZE);
+
+void __init etnaviv_validate_init(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(etnaviv_sensitive_states); i++)
+		bitmap_set(etnaviv_states, etnaviv_sensitive_states[i].offset,
+			   etnaviv_sensitive_states[i].size);
+}
+
+static void etnaviv_warn_if_non_sensitive(struct etna_validation_state *state,
+	unsigned int buf_offset, unsigned int state_addr)
+{
+	if (state->num_relocs && state->relocs->submit_offset < buf_offset) {
+		dev_warn_once(state->gpu->dev,
+			      "%s: relocation for non-sensitive state 0x%x at offset %u\n",
+			      __func__, state_addr,
+			      state->relocs->submit_offset);
+		while (state->num_relocs &&
+		       state->relocs->submit_offset < buf_offset) {
+			state->relocs++;
+			state->num_relocs--;
+		}
+	}
+}
+
+static bool etnaviv_validate_load_state(struct etna_validation_state *state,
+	u32 *ptr, unsigned int state_offset, unsigned int num)
+{
+	unsigned int size = min(ETNAVIV_STATES_SIZE, state_offset + num);
+	unsigned int st_offset = state_offset, buf_offset;
+
+	for_each_set_bit_from(st_offset, etnaviv_states, size) {
+		buf_offset = (ptr - state->start +
+			      st_offset - state_offset) * 4;
+
+		etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4);
+		if (state->num_relocs &&
+		    state->relocs->submit_offset == buf_offset) {
+			state->relocs++;
+			state->num_relocs--;
+			continue;
+		}
+
+		dev_warn_ratelimited(state->gpu->dev,
+				     "%s: load state touches restricted state 0x%x at offset %u\n",
+				     __func__, st_offset * 4, buf_offset);
+		return false;
+	}
+
+	if (state->num_relocs) {
+		buf_offset = (ptr - state->start + num) * 4;
+		etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4 +
+					      state->relocs->submit_offset -
+					      buf_offset);
+	}
+
+	return true;
+}
+
+static uint8_t cmd_length[32] = {
+	[FE_OPCODE_DRAW_PRIMITIVES] = 4,
+	[FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6,
+	[FE_OPCODE_NOP] = 2,
+	[FE_OPCODE_STALL] = 2,
+};
+
+bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, u32 *stream,
+			      unsigned int size,
+			      struct drm_etnaviv_gem_submit_reloc *relocs,
+			      unsigned int reloc_size)
+{
+	struct etna_validation_state state;
+	u32 *buf = stream;
+	u32 *end = buf + size;
+
+	state.gpu = gpu;
+	state.relocs = relocs;
+	state.num_relocs = reloc_size;
+	state.start = stream;
+
+	while (buf < end) {
+		u32 cmd = *buf;
+		unsigned int len, n, off;
+		unsigned int op = cmd >> 27;
+
+		switch (op) {
+		case FE_OPCODE_LOAD_STATE:
+			n = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_COUNT);
+			len = ALIGN(1 + n, 2);
+			if (buf + len > end)
+				break;
+
+			off = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_OFFSET);
+			if (!etnaviv_validate_load_state(&state, buf + 1,
+							 off, n))
+				return false;
+			break;
+
+		case FE_OPCODE_DRAW_2D:
+			n = EXTRACT(cmd, VIV_FE_DRAW_2D_HEADER_COUNT);
+			if (n == 0)
+				n = 256;
+			len = 2 + n * 2;
+			break;
+
+		default:
+			len = cmd_length[op];
+			if (len == 0) {
+				dev_err(gpu->dev, "%s: op %u not permitted at offset %tu\n",
+					__func__, op, buf - state.start);
+				return false;
+			}
+			break;
+		}
+
+		buf += len;
+	}
+
+	if (buf > end) {
+		dev_err(gpu->dev, "%s: commands overflow end of buffer: %tu > %u\n",
+			__func__, buf - state.start, size);
+		return false;
+	}
+
+	return true;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
new file mode 100644
index 000000000000..fec5ffde28b8
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (C) 2015 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/reservation.h>
+#include "etnaviv_drv.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_gem.h"
+
+/*
+ * Cmdstream submission:
+ */
+
+#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
+/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
+#define BO_LOCKED   0x4000
+#define BO_PINNED   0x2000
+
+static inline void __user *to_user_ptr(u64 address)
+{
+	return (void __user *)(uintptr_t)address;
+}
+
+static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
+		struct etnaviv_gpu *gpu, size_t nr)
+{
+	struct etnaviv_gem_submit *submit;
+	size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit));
+
+	submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+	if (submit) {
+		submit->dev = dev;
+		submit->gpu = gpu;
+
+		/* initially, until copy_from_user() and bo lookup succeeds: */
+		submit->nr_bos = 0;
+
+		ww_acquire_init(&submit->ticket, &reservation_ww_class);
+	}
+
+	return submit;
+}
+
+static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
+	struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
+	unsigned nr_bos)
+{
+	struct drm_etnaviv_gem_submit_bo *bo;
+	unsigned i;
+	int ret = 0;
+
+	spin_lock(&file->table_lock);
+
+	for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
+		struct drm_gem_object *obj;
+
+		if (bo->flags & BO_INVALID_FLAGS) {
+			DRM_ERROR("invalid flags: %x\n", bo->flags);
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+
+		submit->bos[i].flags = bo->flags;
+
+		/* normally use drm_gem_object_lookup(), but for bulk lookup
+		 * all under single table_lock just hit object_idr directly:
+		 */
+		obj = idr_find(&file->object_idr, bo->handle);
+		if (!obj) {
+			DRM_ERROR("invalid handle %u at index %u\n",
+				  bo->handle, i);
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+
+		/*
+		 * Take a refcount on the object. The file table lock
+		 * prevents the object_idr's refcount on this being dropped.
+		 */
+		drm_gem_object_reference(obj);
+
+		submit->bos[i].obj = to_etnaviv_bo(obj);
+	}
+
+out_unlock:
+	submit->nr_bos = i;
+	spin_unlock(&file->table_lock);
+
+	return ret;
+}
+
+static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
+{
+	if (submit->bos[i].flags & BO_LOCKED) {
+		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+		ww_mutex_unlock(&etnaviv_obj->resv->lock);
+		submit->bos[i].flags &= ~BO_LOCKED;
+	}
+}
+
+static int submit_lock_objects(struct etnaviv_gem_submit *submit)
+{
+	int contended, slow_locked = -1, i, ret = 0;
+
+retry:
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+		if (slow_locked == i)
+			slow_locked = -1;
+
+		contended = i;
+
+		if (!(submit->bos[i].flags & BO_LOCKED)) {
+			ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
+					&submit->ticket);
+			if (ret == -EALREADY)
+				DRM_ERROR("BO at index %u already on submit list\n",
+					  i);
+			if (ret)
+				goto fail;
+			submit->bos[i].flags |= BO_LOCKED;
+		}
+	}
+
+	ww_acquire_done(&submit->ticket);
+
+	return 0;
+
+fail:
+	for (; i >= 0; i--)
+		submit_unlock_object(submit, i);
+
+	if (slow_locked > 0)
+		submit_unlock_object(submit, slow_locked);
+
+	if (ret == -EDEADLK) {
+		struct etnaviv_gem_object *etnaviv_obj;
+
+		etnaviv_obj = submit->bos[contended].obj;
+
+		/* we lost out in a seqno race, lock and retry.. */
+		ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
+				&submit->ticket);
+		if (!ret) {
+			submit->bos[contended].flags |= BO_LOCKED;
+			slow_locked = contended;
+			goto retry;
+		}
+	}
+
+	return ret;
+}
+
+static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
+{
+	unsigned int context = submit->gpu->fence_context;
+	int i, ret = 0;
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+		bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
+
+		ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static void submit_unpin_objects(struct etnaviv_gem_submit *submit)
+{
+	int i;
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+		if (submit->bos[i].flags & BO_PINNED)
+			etnaviv_gem_put_iova(submit->gpu, &etnaviv_obj->base);
+
+		submit->bos[i].iova = 0;
+		submit->bos[i].flags &= ~BO_PINNED;
+	}
+}
+
+static int submit_pin_objects(struct etnaviv_gem_submit *submit)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+		u32 iova;
+
+		ret = etnaviv_gem_get_iova(submit->gpu, &etnaviv_obj->base,
+					   &iova);
+		if (ret)
+			break;
+
+		submit->bos[i].flags |= BO_PINNED;
+		submit->bos[i].iova = iova;
+	}
+
+	return ret;
+}
+
+static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
+		struct etnaviv_gem_object **obj, u32 *iova)
+{
+	if (idx >= submit->nr_bos) {
+		DRM_ERROR("invalid buffer index: %u (out of %u)\n",
+				idx, submit->nr_bos);
+		return -EINVAL;
+	}
+
+	if (obj)
+		*obj = submit->bos[idx].obj;
+	if (iova)
+		*iova = submit->bos[idx].iova;
+
+	return 0;
+}
+
+/* process the reloc's and patch up the cmdstream as needed: */
+static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
+		u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
+		u32 nr_relocs)
+{
+	u32 i, last_offset = 0;
+	u32 *ptr = stream;
+	int ret;
+
+	for (i = 0; i < nr_relocs; i++) {
+		const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
+		struct etnaviv_gem_object *bobj;
+		u32 iova, off;
+
+		if (r->submit_offset % 4) {
+			DRM_ERROR("non-aligned reloc offset: %u\n",
+				  r->submit_offset);
+			return -EINVAL;
+		}
+
+		/* offset in dwords: */
+		off = r->submit_offset / 4;
+
+		if ((off >= size ) ||
+				(off < last_offset)) {
+			DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
+			return -EINVAL;
+		}
+
+		ret = submit_bo(submit, r->reloc_idx, &bobj, &iova);
+		if (ret)
+			return ret;
+
+		if (r->reloc_offset >=
+		    bobj->base.size - sizeof(*ptr)) {
+			DRM_ERROR("relocation %u outside object", i);
+			return -EINVAL;
+		}
+
+		ptr[off] = iova + r->reloc_offset;
+
+		last_offset = off;
+	}
+
+	return 0;
+}
+
+static void submit_cleanup(struct etnaviv_gem_submit *submit)
+{
+	unsigned i;
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+		submit_unlock_object(submit, i);
+		drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+	}
+
+	ww_acquire_fini(&submit->ticket);
+	kfree(submit);
+}
+
+int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
+		struct drm_file *file)
+{
+	struct etnaviv_drm_private *priv = dev->dev_private;
+	struct drm_etnaviv_gem_submit *args = data;
+	struct drm_etnaviv_gem_submit_reloc *relocs;
+	struct drm_etnaviv_gem_submit_bo *bos;
+	struct etnaviv_gem_submit *submit;
+	struct etnaviv_cmdbuf *cmdbuf;
+	struct etnaviv_gpu *gpu;
+	void *stream;
+	int ret;
+
+	if (args->pipe >= ETNA_MAX_PIPES)
+		return -EINVAL;
+
+	gpu = priv->gpu[args->pipe];
+	if (!gpu)
+		return -ENXIO;
+
+	if (args->stream_size % 4) {
+		DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
+			  args->stream_size);
+		return -EINVAL;
+	}
+
+	if (args->exec_state != ETNA_PIPE_3D &&
+	    args->exec_state != ETNA_PIPE_2D &&
+	    args->exec_state != ETNA_PIPE_VG) {
+		DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
+		return -EINVAL;
+	}
+
+	/*
+	 * Copy the command submission and bo array to kernel space in
+	 * one go, and do this outside of any locks.
+	 */
+	bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
+	relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
+	stream = drm_malloc_ab(1, args->stream_size);
+	cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8,
+					args->nr_bos);
+	if (!bos || !relocs || !stream || !cmdbuf) {
+		ret = -ENOMEM;
+		goto err_submit_cmds;
+	}
+
+	cmdbuf->exec_state = args->exec_state;
+	cmdbuf->ctx = file->driver_priv;
+
+	ret = copy_from_user(bos, to_user_ptr(args->bos),
+			     args->nr_bos * sizeof(*bos));
+	if (ret) {
+		ret = -EFAULT;
+		goto err_submit_cmds;
+	}
+
+	ret = copy_from_user(relocs, to_user_ptr(args->relocs),
+			     args->nr_relocs * sizeof(*relocs));
+	if (ret) {
+		ret = -EFAULT;
+		goto err_submit_cmds;
+	}
+
+	ret = copy_from_user(stream, to_user_ptr(args->stream),
+			     args->stream_size);
+	if (ret) {
+		ret = -EFAULT;
+		goto err_submit_cmds;
+	}
+
+	submit = submit_create(dev, gpu, args->nr_bos);
+	if (!submit) {
+		ret = -ENOMEM;
+		goto err_submit_cmds;
+	}
+
+	ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
+	if (ret)
+		goto err_submit_objects;
+
+	ret = submit_lock_objects(submit);
+	if (ret)
+		goto err_submit_objects;
+
+	if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
+				      relocs, args->nr_relocs)) {
+		ret = -EINVAL;
+		goto err_submit_objects;
+	}
+
+	ret = submit_fence_sync(submit);
+	if (ret)
+		goto err_submit_objects;
+
+	ret = submit_pin_objects(submit);
+	if (ret)
+		goto out;
+
+	ret = submit_reloc(submit, stream, args->stream_size / 4,
+			   relocs, args->nr_relocs);
+	if (ret)
+		goto out;
+
+	memcpy(cmdbuf->vaddr, stream, args->stream_size);
+	cmdbuf->user_size = ALIGN(args->stream_size, 8);
+
+	ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
+	if (ret == 0)
+		cmdbuf = NULL;
+
+	args->fence = submit->fence;
+
+out:
+	submit_unpin_objects(submit);
+
+	/*
+	 * If we're returning -EAGAIN, it may be due to the userptr code
+	 * wanting to run its workqueue outside of any locks. Flush our
+	 * workqueue to ensure that it is run in a timely manner.
+	 */
+	if (ret == -EAGAIN)
+		flush_workqueue(priv->wq);
+
+err_submit_objects:
+	submit_cleanup(submit);
+
+err_submit_cmds:
+	/* if we still own the cmdbuf */
+	if (cmdbuf)
+		etnaviv_gpu_cmdbuf_free(cmdbuf);
+	if (stream)
+		drm_free_large(stream);
+	if (bos)
+		drm_free_large(bos);
+	if (relocs)
+		drm_free_large(relocs);
+
+	return ret;
+}
-- 
2.6.2



More information about the dri-devel mailing list