[PATCH] drm/i915/gvt: batch buffer start gma address audit

Zhao Yan yan.y.zhao at intel.com
Thu Feb 8 05:17:46 UTC 2018


For every workload, record its ring buffer gma ranges in gma history list,
then on every batch buffer start, check if the target batch buffer's gma
range intersect with gma ranges in gma history list: if yes, then consider
there're batch buffer loop and the gmas are regarded as invalid; if no
intersection, add the gma range into gma history list. The history list
will be cleared on bb end, so that one workload's gma history is
independent from another workload's gma history.

Signed-off-by: Zhao Yan <yan.y.zhao at intel.com>
---
 drivers/gpu/drm/i915/gvt/cmd_parser.c | 147 +++++++++++++++++++++++++++++++++-
 1 file changed, 145 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index c8454ac..9cc66a9 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -438,6 +438,14 @@ enum {
 	PPGTT_BUFFER
 };
 
+struct gma_history {
+	struct list_head list;
+	unsigned long gma_start;
+	unsigned long gma_end;
+	int buf_type;
+	int buf_addr_type;
+};
+
 struct parser_exec_state {
 	struct intel_vgpu *vgpu;
 	int ring_id;
@@ -467,6 +475,12 @@ struct parser_exec_state {
 	/* next instruction when return from 2nd batch buffer to batch buffer */
 	unsigned long ret_ip_gma_bb;
 
+	/* gma history in GGTT and PPGTT space specifically
+	 * normally, the gmas are linearly incremented, so a sorted list is
+	 * enough
+	 */
+	struct list_head gma_history[2];
+
 	/* batch buffer address type (GTT or PPGTT)
 	 * used when ret from 2nd level batch buffer
 	 */
@@ -774,6 +788,77 @@ static inline int ip_gma_advance(struct parser_exec_state *s,
 	return 0;
 }
 
+static inline bool gma_intersect(struct gma_history *node,
+		struct gma_history *new)
+{
+	unsigned long gma_start = new->gma_start;
+	unsigned long gma_end = new->gma_end;
+
+	if ((gma_start >= node->gma_start && gma_start < node->gma_end) ||
+			(gma_end > node->gma_start && gma_end <= node->gma_end))
+		return true;
+	else
+		return false;
+}
+
+static inline bool gma_history_add_sorted(struct parser_exec_state *s,
+					struct gma_history *new)
+{
+	struct intel_vgpu *vgpu = s->vgpu;
+	struct list_head *head = &s->gma_history[new->buf_addr_type];
+	struct gma_history *pos;
+
+	if (list_empty(head)) {
+		list_add(&new->list, head);
+		return true;
+	}
+
+	/* the newly added gma is mostly likely a little bigger than last
+	 * added one, so do reverse search and keep the list incrementally
+	 * sorted
+	 */
+	list_for_each_entry_reverse(pos, head, list) {
+		if (gma_intersect(pos, new)) {
+			gvt_vgpu_err("loop in bb gma %lx type %d ring %d\n",
+				new->gma_start, new->buf_addr_type, s->ring_id);
+			return false;
+		}
+
+		/* append bigger gma at tail */
+		if (new->gma_start > pos->gma_start) {
+			list_add(&new->list, &pos->list);
+			return true;
+		}
+	}
+
+	list_add(&new->list, head);
+	return true;
+}
+
+static inline void gma_history_del(struct parser_exec_state *s,
+		int buf_addr_type, int buf_type)
+{
+	struct list_head *head = &s->gma_history[buf_addr_type];
+	struct gma_history *pos, *n;
+
+	if (list_empty(head))
+		return;
+
+	list_for_each_entry_safe_reverse(pos, n, head, list) {
+		if (pos->buf_type == buf_type || buf_type == -1) {
+			list_del(&pos->list);
+			kfree(pos);
+		}
+	}
+}
+
+static inline void gma_history_del_all(struct parser_exec_state *s)
+{
+
+	gma_history_del(s, GTT_BUFFER, -1);
+	gma_history_del(s, PPGTT_BUFFER, -1);
+}
+
 static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
 {
 	if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
@@ -1091,6 +1176,7 @@ static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
 {
 	int ret;
 
+	gma_history_del(s, s->buf_addr_type, s->buf_type);
 	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
 		s->buf_type = BATCH_BUFFER_INSTRUCTION;
 		ret = ip_gma_set(s, s->ret_ip_gma_bb);
@@ -1608,6 +1694,7 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
 	return 1;
 }
 
+#define MAX_BATCH_BUFFER_SIZE 0xffffffff
 static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
 {
 	unsigned long gma = 0;
@@ -1652,11 +1739,49 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
 		cmd_len = get_cmd_length(info, cmd) << 2;
 		*bb_size += cmd_len;
 		gma += cmd_len;
+		if (*bb_size > MAX_BATCH_BUFFER_SIZE) {
+			gvt_vgpu_err("ring %d batch buffer too long\n",
+					s->ring_id);
+			return -EFAULT;
+		}
 	} while (!bb_end);
 
 	return 0;
 }
 
+static bool check_gma_valid(struct parser_exec_state *s,
+		unsigned long gma, unsigned long size)
+{
+	struct intel_vgpu *vgpu = s->vgpu;
+	struct gma_history *h;
+	unsigned long gma_start = gma;
+	unsigned long gma_end = gma + size - 1;
+
+	if (gma_start == INTEL_GVT_INVALID_ADDR)
+		return false;
+
+	if (s->buf_addr_type == GTT_BUFFER &&
+			!intel_gvt_ggtt_validate_range(vgpu, gma_start, size)) {
+		gvt_vgpu_err("ring %d gma not in vgpu's allowed range\n",
+				s->ring_id);
+		return false;
+	}
+
+
+	h = kzalloc(sizeof(*h), GFP_KERNEL);
+	if (!h)
+		return false;
+
+	h->gma_start = gma_start;
+	h->gma_end = gma_end;
+	h->buf_addr_type = s->buf_addr_type;
+	h->buf_type = s->buf_type;
+	if (!gma_history_add_sorted(s, h))
+		return false;
+
+	return true;
+}
+
 static int perform_bb_shadow(struct parser_exec_state *s)
 {
 	struct intel_vgpu *vgpu = s->vgpu;
@@ -1667,13 +1792,16 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 
 	/* get the start gm address of the batch buffer */
 	gma = get_gma_bb_from_cmd(s, 1);
-	if (gma == INTEL_GVT_INVALID_ADDR)
-		return -EFAULT;
 
 	ret = find_bb_size(s, &bb_size);
 	if (ret)
 		return ret;
 
+	if (!check_gma_valid(s, gma, bb_size)) {
+		gvt_vgpu_err("bb start gma not valid, ring %d\n", s->ring_id);
+		return -EFAULT;
+	}
+
 	bb = kzalloc(sizeof(*bb), GFP_KERNEL);
 	if (!bb)
 		return -ENOMEM;
@@ -2511,11 +2639,24 @@ static int command_scan(struct parser_exec_state *s,
 	unsigned long gma_head, gma_tail, gma_bottom;
 	int ret = 0;
 	struct intel_vgpu *vgpu = s->vgpu;
+	struct gma_history *h;
 
 	gma_head = rb_start + rb_head;
 	gma_tail = rb_start + rb_tail;
 	gma_bottom = rb_start +  rb_len;
 
+	INIT_LIST_HEAD(&s->gma_history[GTT_BUFFER]);
+	INIT_LIST_HEAD(&s->gma_history[PPGTT_BUFFER]);
+
+	h = kzalloc(sizeof(*h), GFP_KERNEL);
+	if (!h)
+		return -ENOMEM;
+	h->gma_start = gma_head;
+	h->gma_end = gma_tail;
+	h->buf_addr_type = GTT_BUFFER;
+	h->buf_type = RING_BUFFER_INSTRUCTION;
+	gma_history_add_sorted(s, h);
+
 	while (s->ip_gma != gma_tail) {
 		if (s->buf_type == RING_BUFFER_INSTRUCTION) {
 			if (!(s->ip_gma >= rb_start) ||
@@ -2544,6 +2685,8 @@ static int command_scan(struct parser_exec_state *s,
 		}
 	}
 
+	gma_history_del_all(s);
+
 	return ret;
 }
 
-- 
1.9.1



More information about the intel-gvt-dev mailing list