[CI v3 24/26] drm/xe: Add dma_addr res cursor

Oak Zeng oak.zeng at intel.com
Wed May 29 01:19:22 UTC 2024


From: Matthew Brost <matthew.brost at intel.com>

v1: Modify dma_addr res cursor to use new dma map API (Oak)
    With the new dma map API, we don't need a dma_address
    array to hold dma addresses. Instead, dma address is
    pre-allocated from iova address space. Modify dma_addr
    res cursor for this purpose.

Signed-off-by: Matthew Brost <matthew.brost at intel.com>
Signed-off-by: Oak Zeng <oak.zeng at intel.com>
---
 drivers/gpu/drm/xe/xe_res_cursor.h | 38 +++++++++++++++++++++++++++++-
 1 file changed, 37 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res_cursor.h
index 0a306963aa8e..f4f3c296035e 100644
--- a/drivers/gpu/drm/xe/xe_res_cursor.h
+++ b/drivers/gpu/drm/xe/xe_res_cursor.h
@@ -46,6 +46,7 @@ struct xe_res_cursor {
 	u32 mem_type;
 	struct scatterlist *sgl;
 	struct drm_buddy *mm;
+	bool dma_addr;
 };
 
 static struct drm_buddy *xe_res_get_buddy(struct ttm_resource *res)
@@ -71,6 +72,7 @@ static inline void xe_res_first(struct ttm_resource *res,
 				struct xe_res_cursor *cur)
 {
 	cur->sgl = NULL;
+	cur->dma_addr = false;
 	if (!res)
 		goto fallback;
 
@@ -160,6 +162,7 @@ static inline void xe_res_first_sg(const struct sg_table *sg,
 	XE_WARN_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
 		   !IS_ALIGNED(size, PAGE_SIZE));
 	cur->node = NULL;
+	cur->dma_addr = false;
 	cur->start = start;
 	cur->remaining = size;
 	cur->size = 0;
@@ -168,6 +171,29 @@ static inline void xe_res_first_sg(const struct sg_table *sg,
 	__xe_res_sg_next(cur);
 }
 
+/**
+ * xe_res_first_dma - initialize a xe_res_cursor with dma_addr
+ *
+ * @dma_addr: dma address
+ * @start: Start of the range
+ * @size: Size of the range
+ * @cur: cursor object to initialize
+ *
+ * Start walking over the range of allocations between @start and @size.
+ */
+static inline void xe_res_first_dma(const dma_addr_t dma_addr,
+				    u64 size, struct xe_res_cursor *cur)
+{
+	XE_WARN_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+	cur->node = NULL;
+	cur->start = dma_addr;
+	cur->remaining = size;
+	cur->sgl = NULL;
+	cur->dma_addr = true;
+	cur->mem_type = XE_PL_TT;
+}
+
 /**
  * xe_res_next - advance the cursor
  *
@@ -194,6 +220,11 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
 		return;
 	}
 
+	if (cur->dma_addr) {
+		cur->start += size;
+		return;
+	}
+
 	if (cur->sgl) {
 		cur->start += size;
 		__xe_res_sg_next(cur);
@@ -235,6 +266,11 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
  */
 static inline u64 xe_res_dma(const struct xe_res_cursor *cur)
 {
-	return cur->sgl ? sg_dma_address(cur->sgl) + cur->start : cur->start;
+	if (cur->dma_addr)
+		return cur->start;
+	else if (cur->sgl)
+		return sg_dma_address(cur->sgl) + cur->start;
+	else
+		return cur->start;
 }
 #endif
-- 
2.26.3



More information about the Intel-xe mailing list