[PATCH] drm/gpuvm: merge adjacent gpuva range during a map operation

Oak Zeng oak.zeng at intel.com
Wed Sep 18 16:47:40 UTC 2024


Considder this example. Before a map operation, the gpuva ranges
in a vm looks like below:

 VAs | start              | range              | end                | object             | object offset
-------------------------------------------------------------------------------------------------------------
     | 0x0000000000000000 | 0x00007ffff5cd0000 | 0x00007ffff5cd0000 | 0x0000000000000000 | 0x0000000000000000
     | 0x00007ffff5cf0000 | 0x00000000000c7000 | 0x00007ffff5db7000 | 0x0000000000000000 | 0x0000000000000000

Now user want to map range [0x00007ffff5cd0000 - 0x00007ffff5cf0000).
With existing codes, the range walking in __drm_gpuvm_sm_map won't
find any range, so we end up a single map operation for range
[0x00007ffff5cd0000 - 0x00007ffff5cf0000). This result in:

 VAs | start              | range              | end                | object             | object offset
-------------------------------------------------------------------------------------------------------------
     | 0x0000000000000000 | 0x00007ffff5cd0000 | 0x00007ffff5cd0000 | 0x0000000000000000 | 0x0000000000000000
     | 0x00007ffff5cd0000 | 0x0000000000020000 | 0x00007ffff5cf0000 | 0x0000000000000000 | 0x0000000000000000
     | 0x00007ffff5cf0000 | 0x00000000000c7000 | 0x00007ffff5db7000 | 0x0000000000000000 | 0x0000000000000000

The correct behavior is to merge those 3 ranges. So __drm_gpuvm_sm_map
is slightly modified to handle this corner case. The walker is changed
to find the range just before or after the mapping request, and merge
adjacent ranges using unmap and map operations. with this change, the
end result of above example is as below:

 VAs | start              | range              | end                | object             | object offset
-------------------------------------------------------------------------------------------------------------
     | 0x0000000000000000 | 0x00007ffff5db7000 | 0x00007ffff5db7000 | 0x0000000000000000 | 0x0000000000000000

Even though this fixes a real problem, the codes looks a little ugly.
So I welcome any better fix or suggestion.

Signed-off-by: Oak Zeng <oak.zeng at intel.com>
---
 drivers/gpu/drm/drm_gpuvm.c | 62 +++++++++++++++++++++++++------------
 1 file changed, 43 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 4b6fcaea635e..51825c794bdc 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -2104,28 +2104,30 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
 {
 	struct drm_gpuva *va, *next;
 	u64 req_end = req_addr + req_range;
+	u64 merged_req_addr = req_addr;
+	u64 merged_req_end = req_end;
 	int ret;
 
 	if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
 		return -EINVAL;
 
-	drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
+	drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr - 1, req_end + 1) {
 		struct drm_gem_object *obj = va->gem.obj;
 		u64 offset = va->gem.offset;
 		u64 addr = va->va.addr;
 		u64 range = va->va.range;
 		u64 end = addr + range;
-		bool merge = !!va->gem.obj;
+		bool merge;
 
 		if (addr == req_addr) {
-			merge &= obj == req_obj &&
+			merge = obj == req_obj &&
 				 offset == req_offset;
 
 			if (end == req_end) {
 				ret = op_unmap_cb(ops, priv, va, merge);
 				if (ret)
 					return ret;
-				break;
+				continue;
 			}
 
 			if (end < req_end) {
@@ -2162,22 +2164,33 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
 			};
 			struct drm_gpuva_op_unmap u = { .va = va };
 
-			merge &= obj == req_obj &&
-				 offset + ls_range == req_offset;
+			merge = (obj && obj == req_obj &&
+				 offset + ls_range == req_offset) ||
+				 (!obj && !req_obj);
 			u.keep = merge;
 
 			if (end == req_end) {
 				ret = op_remap_cb(ops, priv, &p, NULL, &u);
 				if (ret)
 					return ret;
-				break;
+				continue;
 			}
 
 			if (end < req_end) {
-				ret = op_remap_cb(ops, priv, &p, NULL, &u);
-				if (ret)
-					return ret;
-				continue;
+				if (end == req_addr) {
+					if (merge) {
+						ret = op_unmap_cb(ops, priv, va, merge);
+						if (ret)
+							return ret;
+						merged_req_addr = addr;
+						continue;
+					}
+				} else {
+					ret = op_remap_cb(ops, priv, &p, NULL, &u);
+					if (ret)
+						return ret;
+					continue;
+				}
 			}
 
 			if (end > req_end) {
@@ -2195,15 +2208,16 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
 				break;
 			}
 		} else if (addr > req_addr) {
-			merge &= obj == req_obj &&
+			merge = (obj && obj == req_obj &&
 				 offset == req_offset +
-					   (addr - req_addr);
+					   (addr - req_addr)) ||
+				 (!obj && !req_obj);
 
 			if (end == req_end) {
 				ret = op_unmap_cb(ops, priv, va, merge);
 				if (ret)
 					return ret;
-				break;
+				continue;
 			}
 
 			if (end < req_end) {
@@ -2225,16 +2239,26 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
 					.keep = merge,
 				};
 
-				ret = op_remap_cb(ops, priv, NULL, &n, &u);
-				if (ret)
-					return ret;
-				break;
+				if (addr == req_end) {
+					if (merge) {
+						ret = op_unmap_cb(ops, priv, va, merge);
+						if (ret)
+							return ret;
+						merged_req_end = end;
+						break;
+					}
+				} else {
+					ret = op_remap_cb(ops, priv, NULL, &n, &u);
+					if (ret)
+						return ret;
+					break;
+				}
 			}
 		}
 	}
 
 	return op_map_cb(ops, priv,
-			 req_addr, req_range,
+			 merged_req_addr, merged_req_end - merged_req_addr,
 			 req_obj, req_offset);
 }
 
-- 
2.26.3



More information about the Intel-xe mailing list