[CI v3 07/26] drm: move xe_sg_segment_size to drm layer

Oak Zeng oak.zeng at intel.com
Thu May 30 00:47:13 UTC 2024


Move this helper function to drm layer and rename it to
drm_gem_dma_max_sg_segment, so it can be used by the coming
drm patches also. No functional changes.

Cc: Matthew Brost <matthew.brost at intel.com>
Cc: Thomas Hellström <thomas.hellstrom at intel.com>
Cc: Brian Welty <brian.welty at intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
Signed-off-by: Oak Zeng <oak.zeng at intel.com>
---
 drivers/gpu/drm/xe/xe_bo.c       |  3 ++-
 drivers/gpu/drm/xe/xe_bo.h       | 24 ------------------------
 drivers/gpu/drm/xe/xe_device.c   |  3 ++-
 drivers/gpu/drm/xe/xe_hmm.c      |  3 ++-
 include/drm/drm_gem_dma_helper.h | 25 +++++++++++++++++++++++++
 5 files changed, 31 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 2bae01ce4e5b..d5823aab9fb8 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -9,6 +9,7 @@
 
 #include <drm/drm_drv.h>
 #include <drm/drm_gem_ttm_helper.h>
+#include <drm/drm_gem_dma_helper.h>
 #include <drm/drm_managed.h>
 #include <drm/ttm/ttm_device.h>
 #include <drm/ttm/ttm_placement.h>
@@ -299,7 +300,7 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
 	ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
 						num_pages, 0,
 						(u64)num_pages << PAGE_SHIFT,
-						xe_sg_segment_size(xe_tt->dev),
+						drm_gem_dma_max_sg_segment(xe_tt->dev),
 						GFP_KERNEL);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 6de894c728f5..90261c77ad13 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -289,30 +289,6 @@ void xe_bo_put_commit(struct llist_head *deferred);
 
 struct sg_table *xe_bo_sg(struct xe_bo *bo);
 
-/*
- * xe_sg_segment_size() - Provides upper limit for sg segment size.
- * @dev: device pointer
- *
- * Returns the maximum segment size for the 'struct scatterlist'
- * elements.
- */
-static inline unsigned int xe_sg_segment_size(struct device *dev)
-{
-	struct scatterlist __maybe_unused sg;
-	size_t max = BIT_ULL(sizeof(sg.length) * 8) - 1;
-
-	max = min_t(size_t, max, dma_max_mapping_size(dev));
-
-	/*
-	 * The iommu_dma_map_sg() function ensures iova allocation doesn't
-	 * cross dma segment boundary. It does so by padding some sg elements.
-	 * This can cause overflow, ending up with sg->length being set to 0.
-	 * Avoid this by ensuring maximum segment size is half of 'max'
-	 * rounded down to PAGE_SIZE.
-	 */
-	return round_down(max / 2, PAGE_SIZE);
-}
-
 #define i915_gem_object_flush_if_display(obj)		((void)(obj))
 
 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index f04b11e45c2d..a6ef8a769148 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -12,6 +12,7 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_client.h>
 #include <drm/drm_gem_ttm_helper.h>
+#include <drm/drm_gem_dma_helper.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_managed.h>
 #include <drm/drm_print.h>
@@ -407,7 +408,7 @@ static int xe_set_dma_info(struct xe_device *xe)
 	unsigned int mask_size = xe->info.dma_mask_size;
 	int err;
 
-	dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev));
+	dma_set_max_seg_size(xe->drm.dev, drm_gem_dma_max_sg_segment(xe->drm.dev));
 
 	err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
 	if (err)
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
index 2c32dc46f7d4..f99746c4bd6b 100644
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -3,6 +3,7 @@
  * Copyright © 2024 Intel Corporation
  */
 
+#include <drm/drm_gem_dma_helper.h>
 #include <linux/scatterlist.h>
 #include <linux/mmu_notifier.h>
 #include <linux/dma-mapping.h>
@@ -96,7 +97,7 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
 	}
 
 	ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
-						xe_sg_segment_size(dev), GFP_KERNEL);
+						drm_gem_dma_max_sg_segment(dev), GFP_KERNEL);
 	if (ret)
 		goto free_pages;
 
diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h
index a827bde494f6..ff7403b103ad 100644
--- a/include/drm/drm_gem_dma_helper.h
+++ b/include/drm/drm_gem_dma_helper.h
@@ -5,6 +5,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_ioctl.h>
 #include <drm/drm_gem.h>
+#include <linux/dma-mapping.h>
 
 struct drm_mode_create_dumb;
 
@@ -133,6 +134,30 @@ static inline int drm_gem_dma_object_mmap(struct drm_gem_object *obj, struct vm_
 	return drm_gem_dma_mmap(dma_obj, vma);
 }
 
+/*
+ * drm_gem_dma_max_sg_segment() - Provides upper limit for sg segment size.
+ * @dev: device pointer
+ *
+ * Returns the maximum segment size for the 'struct scatterlist'
+ * elements.
+ */
+static inline unsigned int drm_gem_dma_max_sg_segment(struct device *dev)
+{
+	struct scatterlist __maybe_unused sg;
+	size_t max = BIT_ULL(sizeof(sg.length) * 8) - 1;
+
+	max = min_t(size_t, max, dma_max_mapping_size(dev));
+
+	/*
+	 * The iommu_dma_map_sg() function ensures iova allocation doesn't
+	 * cross dma segment boundary. It does so by padding some sg elements.
+	 * This can cause overflow, ending up with sg->length being set to 0.
+	 * Avoid this by ensuring maximum segment size is half of 'max'
+	 * rounded down to PAGE_SIZE.
+	 */
+	return round_down(max / 2, PAGE_SIZE);
+}
+
 /*
  * Driver ops
  */
-- 
2.26.3



More information about the Intel-xe mailing list