[PATCH v4] drm/i915: stop using swiotlb
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Tue Jul 26 13:12:09 UTC 2022
On 25/07/2022 15:18, Robert Beckett wrote:
> Calling swiotlb functions directly is nowadays considered harmful. See
> https://lore.kernel.org/intel-gfx/20220711082614.GA29487@lst.de/
>
> Replace swiotlb_max_segment() calls with dma_max_mapping_size().
> In i915_gem_object_get_pages_internal() no longer consider max_segment
> only if CONFIG_SWIOTLB is enabled. There can be other (iommu related)
> causes of specific max segment sizes.
>
> Cc: Christoph Hellwig <hch at lst.de>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at linux.intel.com>
> Cc: Thomas Hellstrom <thomas.hellstrom at intel.com>
> Cc: Matthew Auld <matthew.auld at intel.com>
>
> v2: - restore UINT_MAX clamp in i915_sg_segment_size()
> - drop PAGE_SIZE check as it will always be >= PAGE_SIZE
> v3: - actually clamp to UINT_MAX in i915_sg_segment_size()
> v4: - round down max segment size to PAGE_SIZE
>
> Reviewed-by: Christoph Hellwig <hch at lst.de>
> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> Signed-off-by: Robert Beckett <bob.beckett at collabora.com>
> ---
> drivers/gpu/drm/i915/gem/i915_gem_internal.c | 19 ++++---------------
> drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 2 +-
> drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 4 ++--
> drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 2 +-
> drivers/gpu/drm/i915/i915_scatterlist.h | 17 ++++-------------
> 5 files changed, 12 insertions(+), 32 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
> index c698f95af15f..24f37658f1bb 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
> @@ -6,7 +6,6 @@
>
> #include <linux/scatterlist.h>
> #include <linux/slab.h>
> -#include <linux/swiotlb.h>
>
> #include "i915_drv.h"
> #include "i915_gem.h"
> @@ -38,22 +37,12 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
> struct scatterlist *sg;
> unsigned int sg_page_sizes;
> unsigned int npages;
> - int max_order;
> + int max_order = MAX_ORDER;
> + unsigned int max_segment;
> gfp_t gfp;
>
> - max_order = MAX_ORDER;
> -#ifdef CONFIG_SWIOTLB
> - if (is_swiotlb_active(obj->base.dev->dev)) {
> - unsigned int max_segment;
> -
> - max_segment = swiotlb_max_segment();
> - if (max_segment) {
> - max_segment = max_t(unsigned int, max_segment,
> - PAGE_SIZE) >> PAGE_SHIFT;
> - max_order = min(max_order, ilog2(max_segment));
> - }
> - }
> -#endif
> + max_segment = i915_sg_segment_size(i915->drm.dev) >> PAGE_SHIFT;
> + max_order = min(max_order, ilog2(max_segment));
>
> gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
> if (IS_I965GM(i915) || IS_I965G(i915)) {
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
> index 4eed3dd90ba8..34b9c76cd8e6 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
> @@ -194,7 +194,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
> struct intel_memory_region *mem = obj->mm.region;
> struct address_space *mapping = obj->base.filp->f_mapping;
> const unsigned long page_count = obj->base.size / PAGE_SIZE;
> - unsigned int max_segment = i915_sg_segment_size();
> + unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
> struct sg_table *st;
> struct sgt_iter sgt_iter;
> struct page *page;
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> index 5a5cf332d8a5..7a828c9c0f6d 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> @@ -189,7 +189,7 @@ static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
> struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
> struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM];
> struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
> - const unsigned int max_segment = i915_sg_segment_size();
> + const unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
> const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT;
> struct file *filp = i915_tt->filp;
> struct sgt_iter sgt_iter;
> @@ -568,7 +568,7 @@ static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm)
> ret = sg_alloc_table_from_pages_segment(st,
> ttm->pages, ttm->num_pages,
> 0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
> - i915_sg_segment_size(), GFP_KERNEL);
> + i915_sg_segment_size(i915_tt->dev), GFP_KERNEL);
> if (ret) {
> st->sgl = NULL;
> return ERR_PTR(ret);
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> index 094f06b4ce33..dfc35905dba2 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> @@ -129,7 +129,7 @@ static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
> static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
> {
> const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
> - unsigned int max_segment = i915_sg_segment_size();
> + unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev);
> struct sg_table *st;
> unsigned int sg_page_sizes;
> struct page **pvec;
> diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h
> index 9ddb3e743a3e..1377a97a20cf 100644
> --- a/drivers/gpu/drm/i915/i915_scatterlist.h
> +++ b/drivers/gpu/drm/i915/i915_scatterlist.h
> @@ -9,7 +9,7 @@
>
> #include <linux/pfn.h>
> #include <linux/scatterlist.h>
> -#include <linux/swiotlb.h>
> +#include <linux/dma-mapping.h>
>
> #include "i915_gem.h"
>
> @@ -127,19 +127,10 @@ static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg)
> return page_sizes;
> }
>
> -static inline unsigned int i915_sg_segment_size(void)
> +static inline unsigned int i915_sg_segment_size(struct device *dev)
> {
> - unsigned int size = swiotlb_max_segment();
> -
> - if (size == 0)
> - size = UINT_MAX;
> -
> - size = rounddown(size, PAGE_SIZE);
> - /* swiotlb_max_segment_size can return 1 byte when it means one page. */
> - if (size < PAGE_SIZE)
> - size = PAGE_SIZE;
> -
> - return size;
> + size_t max = min_t(size_t, UINT_MAX, dma_max_mapping_size(dev));
Hope your patience is not growing thin but blank line here as per
checkpatch. I could fudge when applying though.
Otherwise it finally passed CI, yay! Now do we dig deeper to a)
understand how/where it failed and b) check what are the possible return
values from dma_max_mapping_size(), or call it done?
Regards,
Tvrtko
> + return round_down(max, PAGE_SIZE);
> }
>
> bool i915_sg_trim(struct sg_table *orig_st);
More information about the dri-devel
mailing list