[PATCH 10/35] drm/amdkfd: register overlap system memory range

Felix Kuehling Felix.Kuehling at amd.com
Thu Jan 7 03:01:02 UTC 2021


From: Philip Yang <Philip.Yang at amd.com>

No overlap range interval [start, last] exist in svms object interval
tree. If process registers new range which has overlap with old range,
the old range split into 2 ranges depending on the overlap happens at
head or tail part of old range.

Signed-off-by: Philip Yang <Philip.Yang at amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 297 ++++++++++++++++++++++++++-
 1 file changed, 294 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 02918faa70d5..ad007261f54c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -293,6 +293,278 @@ static void svm_range_debug_dump(struct svm_range_list *svms)
 	}
 }
 
+static bool
+svm_range_is_same_attrs(struct svm_range *old, struct svm_range *new)
+{
+	return (old->prefetch_loc == new->prefetch_loc &&
+		old->flags == new->flags &&
+		old->granularity == new->granularity);
+}
+
+static int
+svm_range_split_pages(struct svm_range *new, struct svm_range *old,
+		      uint64_t start, uint64_t last)
+{
+	unsigned long old_start;
+	dma_addr_t *pages_addr;
+	uint64_t d;
+
+	old_start = old->it_node.start;
+	new->pages_addr = kvmalloc_array(new->npages,
+					 sizeof(*new->pages_addr),
+					 GFP_KERNEL | __GFP_ZERO);
+	if (!new->pages_addr)
+		return -ENOMEM;
+
+	d = new->it_node.start - old_start;
+	memcpy(new->pages_addr, old->pages_addr + d,
+	       new->npages * sizeof(*new->pages_addr));
+
+	old->npages = last - start + 1;
+	old->it_node.start = start;
+	old->it_node.last = last;
+
+	pages_addr = kvmalloc_array(old->npages, sizeof(*pages_addr),
+				    GFP_KERNEL);
+	if (!pages_addr) {
+		kvfree(new->pages_addr);
+		return -ENOMEM;
+	}
+
+	d = start - old_start;
+	memcpy(pages_addr, old->pages_addr + d,
+	       old->npages * sizeof(*pages_addr));
+
+	kvfree(old->pages_addr);
+	old->pages_addr = pages_addr;
+
+	return 0;
+}
+
+/**
+ * svm_range_split_adjust - split range and adjust
+ *
+ * @new: new range
+ * @old: the old range
+ * @start: the old range adjust to start address in pages
+ * @last: the old range adjust to last address in pages
+ *
+ * Copy system memory pages, pages_addr or vram mm_nodes in old range to new
+ * range from new_start up to size new->npages, the remaining old range is from
+ * start to last
+ *
+ * Return:
+ * 0 - OK, -ENOMEM - out of memory
+ */
+static int
+svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
+		      uint64_t start, uint64_t last)
+{
+	int r = -EINVAL;
+
+	pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
+		 new->svms, new->it_node.start, old->it_node.start,
+		 old->it_node.last, start, last);
+
+	if (new->it_node.start < old->it_node.start ||
+	    new->it_node.last > old->it_node.last) {
+		WARN_ONCE(1, "invalid new range start or last\n");
+		return -EINVAL;
+	}
+
+	if (old->pages_addr)
+		r = svm_range_split_pages(new, old, start, last);
+	else
+		WARN_ONCE(1, "split adjust invalid pages_addr and nodes\n");
+	if (r)
+		return r;
+
+	new->flags = old->flags;
+	new->preferred_loc = old->preferred_loc;
+	new->prefetch_loc = old->prefetch_loc;
+	new->actual_loc = old->actual_loc;
+	new->granularity = old->granularity;
+	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
+	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
+
+	return 0;
+}
+
+/**
+ * svm_range_split - split a range in 2 ranges
+ *
+ * @prange: the svm range to split
+ * @start: the remaining range start address in pages
+ * @last: the remaining range last address in pages
+ * @new: the result new range generated
+ *
+ * Two cases only:
+ * case 1: if start == prange->it_node.start
+ *         prange ==> prange[start, last]
+ *         new range [last + 1, prange->it_node.last]
+ *
+ * case 2: if last == prange->it_node.last
+ *         prange ==> prange[start, last]
+ *         new range [prange->it_node.start, start - 1]
+ *
+ * Context: Caller hold svms->rw_sem as write mode
+ *
+ * Return:
+ * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
+ */
+static int
+svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
+		struct svm_range **new)
+{
+	uint64_t old_start = prange->it_node.start;
+	uint64_t old_last = prange->it_node.last;
+	struct svm_range_list *svms;
+	int r = 0;
+
+	pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
+		 old_start, old_last, start, last);
+
+	if (old_start != start && old_last != last)
+		return -EINVAL;
+	if (start < old_start || last > old_last)
+		return -EINVAL;
+
+	svms = prange->svms;
+	if (old_start == start) {
+		*new = svm_range_new(svms, last + 1, old_last);
+		if (!*new)
+			return -ENOMEM;
+		r = svm_range_split_adjust(*new, prange, start, last);
+	} else {
+		*new = svm_range_new(svms, old_start, start - 1);
+		if (!*new)
+			return -ENOMEM;
+		r = svm_range_split_adjust(*new, prange, start, last);
+	}
+
+	return r;
+}
+
+static int
+svm_range_split_two(struct svm_range *prange, struct svm_range *new,
+		    uint64_t start, uint64_t last,
+		    struct list_head *insert_list,
+		    struct list_head *update_list)
+{
+	struct svm_range *tail, *tail2;
+	int r;
+
+	r = svm_range_split(prange, prange->it_node.start, start - 1, &tail);
+	if (r)
+		return r;
+	r = svm_range_split(tail, start, last, &tail2);
+	if (r)
+		return r;
+	list_add(&tail2->list, insert_list);
+	list_add(&tail->list, insert_list);
+
+	if (!svm_range_is_same_attrs(prange, new))
+		list_add(&tail->update_list, update_list);
+
+	return 0;
+}
+
+static int
+svm_range_split_tail(struct svm_range *prange, struct svm_range *new,
+		     uint64_t start, struct list_head *insert_list,
+		     struct list_head *update_list)
+{
+	struct svm_range *tail;
+	int r;
+
+	r = svm_range_split(prange, prange->it_node.start, start - 1, &tail);
+	if (r)
+		return r;
+	list_add(&tail->list, insert_list);
+	if (!svm_range_is_same_attrs(prange, new))
+		list_add(&tail->update_list, update_list);
+
+	return 0;
+}
+
+static int
+svm_range_split_head(struct svm_range *prange, struct svm_range *new,
+		     uint64_t last, struct list_head *insert_list,
+		     struct list_head *update_list)
+{
+	struct svm_range *head;
+	int r;
+
+	r = svm_range_split(prange, last + 1, prange->it_node.last, &head);
+	if (r)
+		return r;
+	list_add(&head->list, insert_list);
+	if (!svm_range_is_same_attrs(prange, new))
+		list_add(&head->update_list, update_list);
+
+	return 0;
+}
+
+static int
+svm_range_split_add_front(struct svm_range *prange, struct svm_range *new,
+			  uint64_t start, uint64_t last,
+			  struct list_head *insert_list,
+			  struct list_head *update_list)
+{
+	struct svm_range *front, *tail;
+	int r = 0;
+
+	front = svm_range_new(prange->svms, start, prange->it_node.start - 1);
+	if (!front)
+		return -ENOMEM;
+
+	list_add(&front->list, insert_list);
+	list_add(&front->update_list, update_list);
+
+	if (prange->it_node.last > last) {
+		pr_debug("split old in 2\n");
+		r = svm_range_split(prange, prange->it_node.start, last, &tail);
+		if (r)
+			return r;
+		list_add(&tail->list, insert_list);
+	}
+	if (!svm_range_is_same_attrs(prange, new))
+		list_add(&prange->update_list, update_list);
+
+	return 0;
+}
+
+struct svm_range *svm_range_clone(struct svm_range *old)
+{
+	struct svm_range *new;
+
+	new = svm_range_new(old->svms, old->it_node.start, old->it_node.last);
+	if (!new)
+		return NULL;
+
+	if (old->pages_addr) {
+		new->pages_addr = kvmalloc_array(new->npages,
+						 sizeof(*new->pages_addr),
+						 GFP_KERNEL);
+		if (!new->pages_addr) {
+			kfree(new);
+			return NULL;
+		}
+		memcpy(new->pages_addr, old->pages_addr,
+		       old->npages * sizeof(*old->pages_addr));
+	}
+
+	new->flags = old->flags;
+	new->preferred_loc = old->preferred_loc;
+	new->prefetch_loc = old->prefetch_loc;
+	new->actual_loc = old->actual_loc;
+	new->granularity = old->granularity;
+	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
+	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
+
+	return new;
+}
+
 /**
  * svm_range_handle_overlap - split overlap ranges
  * @svms: svm range list header
@@ -334,15 +606,27 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
 	node = interval_tree_iter_first(&svms->objects, start, last);
 	while (node) {
 		struct interval_tree_node *next;
+		struct svm_range *old;
 
 		pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
 			 node->last);
 
-		prange = container_of(node, struct svm_range, it_node);
+		old = container_of(node, struct svm_range, it_node);
 		next = interval_tree_iter_next(node, start, last);
 
+		prange = svm_range_clone(old);
+		if (!prange) {
+			r = -ENOMEM;
+			goto out;
+		}
+
+		list_add(&old->remove_list, remove_list);
+		list_add(&prange->list, insert_list);
+
 		if (node->start < start && node->last > last) {
 			pr_debug("split in 2 ranges\n");
+			r = svm_range_split_two(prange, new, start, last,
+						insert_list, update_list);
 			start = last + 1;
 
 		} else if (node->start < start) {
@@ -352,11 +636,15 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
 			 */
 			uint64_t old_last = node->last;
 
+			pr_debug("change old range last\n");
+			r = svm_range_split_tail(prange, new, start,
+						 insert_list, update_list);
 			start = old_last + 1;
 
 		} else if (node->start == start && node->last > last) {
 			pr_debug("change old range start\n");
-
+			r = svm_range_split_head(prange, new, last,
+						 insert_list, update_list);
 			start = last + 1;
 
 		} else if (node->start == start) {
@@ -364,12 +652,15 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
 				pr_debug("found exactly same range\n");
 			else
 				pr_debug("next loop to add remaining range\n");
+			if (!svm_range_is_same_attrs(prange, new))
+				list_add(&prange->update_list, update_list);
 
 			start = node->last + 1;
 
 		} else { /* node->start > start */
 			pr_debug("add new range at front\n");
-
+			r = svm_range_split_add_front(prange, new, start, last,
+						      insert_list, update_list);
 			start = node->last + 1;
 		}
 
-- 
2.29.2



More information about the amd-gfx mailing list