[PATCH v4 6/7] drm/gpuvm: Add DRM_GPUVA_REPEAT flag and logic

Boris Brezillon boris.brezillon at collabora.com
Thu Aug 21 12:29:46 UTC 2025


On Mon, 07 Jul 2025 21:33:13 +0200
"Danilo Krummrich" <dakr at kernel.org> wrote:

> On Mon Jul 7, 2025 at 7:04 PM CEST, Caterina Shablia wrote:
> > From: Asahi Lina <lina+kernel at asahilina.net>
> >
> > To be able to support "fake sparse" mappings without relying on GPU page
> > fault handling, drivers may need to create large (e.g. 4GiB) mappings of
> > the same page repeatedly (or same range of pages). Doing this through
> > individual mappings would be very wasteful. This can be handled better
> > by using a flag on map creation, but to do it safely, drm_gpuvm needs to
> > be aware of this special case.
> >
> > Add a flag that signals that a given mapping is a page mapping, which is
> > repeated all over the entire requested VA range. This tweaks the
> > sm_map() logic to treat the GEM offsets differently when mappings are
> > a repeated ones so they are not incremented as they would be with regular
> > mappings.
> >
> > The size of the GEM portion to repeat is passed through
> > drm_gpuva::gem::range. Most of the time it will be a page size, but
> > it can be bigger as long as it's less that drm_gpuva::va::range, and
> > drm_gpuva::gem::range is a multiple of drm_gpuva::va::range.  
> 
> Should be "as long as it's less that drm_gpuva::va::range, and
> drm_gpuva::va::range is a multiple of  drm_gpuva::gem::range".
> 
> I also think this feature deserves its own section in the global GPUVM
> documentation -- please add a corresponding paragraph.

Sure.

> 
> > +static int check_map_req(struct drm_gpuvm *gpuvm,  
> 
> Let's call this validate_map_request().

I can do that, sure.

> 
> > +			 const struct drm_gpuvm_map_req *req)
> > +{
> > +	if (unlikely(!drm_gpuvm_range_valid(gpuvm, req->va.addr, req->va.range)))
> > +		return -EINVAL;
> > +
> > +	if (req->flags & DRM_GPUVA_REPEAT) {
> > +		u64 va_range = req->va.range;
> > +
> > +		/* For a repeated mapping, GEM range must be > 0
> > +		 * and a multiple of the VA range.
> > +		 */
> > +		if (unlikely(!req->gem.range ||
> > +			     va_range < req->gem.range ||
> > +			     do_div(va_range, req->gem.range)))
> > +			return -EINVAL;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> >  static int
> >  __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> >  		   const struct drm_gpuvm_ops *ops, void *priv,
> > @@ -2137,6 +2179,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> >  	struct drm_gpuva reqva = {
> >  		.va.addr = req->va.addr,
> >  		.va.range = req->va.range,
> > +		.gem.range = req->gem.range,
> >  		.gem.offset = req->gem.offset,
> >  		.gem.obj = req->gem.obj,
> >  		.flags = req->flags,
> > @@ -2144,7 +2187,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> >  	u64 req_end = req->va.addr + req->va.range;
> >  	int ret;
> >  
> > -	if (unlikely(!drm_gpuvm_range_valid(gpuvm, req->va.addr, req->va.range)))
> > +	ret = check_map_req(gpuvm, req);
> > +	if (unlikely(ret))
> >  		return -EINVAL;
> >  
> >  	drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req->va.addr, req_end) {
> > @@ -2175,7 +2219,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> >  					.va.addr = req_end,
> >  					.va.range = range - req->va.range,
> >  					.gem.obj = obj,
> > -					.gem.offset = offset + req->va.range,
> > +					.gem.range = va->gem.range,
> > +					.gem.offset = offset,  
> 
> Why change this from offset + req->va.range to just offset?

This is conditionally updated if DRM_GPUVA_REPEAT is not set further
down, because we don't want to move the GEM offset if the mapped portion
is repeated.

> 
> Same for similar other changes below.
> 
> Also it seems that we need to update the documentation which shows all potential
> cases when calling __drm_gpuvm_sm_map() [1].

Yep, will do.

> 
> [1] https://docs.kernel.org/gpu/drm-mm.html#split-and-merge
> 
> >  					.flags = va->flags,
> >  				};
> >  				struct drm_gpuva_op_unmap u = {
> > @@ -2183,6 +2228,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> >  					.keep = merge,
> >  				};
> >  
> > +				if (!(va->flags & DRM_GPUVA_REPEAT))
> > +					n.gem.offset += req->va.range;
> > +
> >  				ret = op_remap_cb(ops, priv, NULL, &n, &u);
> >  				if (ret)
> >  					return ret;
> > @@ -2194,6 +2242,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> >  				.va.addr = addr,
> >  				.va.range = ls_range,
> >  				.gem.obj = obj,
> > +				.gem.range = va->gem.range,
> >  				.gem.offset = offset,
> >  				.flags = va->flags,
> >  			};
> > @@ -2220,11 +2269,14 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> >  					.va.addr = req_end,
> >  					.va.range = end - req_end,
> >  					.gem.obj = obj,
> > -					.gem.offset = offset + ls_range +
> > -						      req->va.range,
> > +					.gem.range = va->gem.range,
> > +					.gem.offset = offset,
> >  					.flags = va->flags,
> >  				};
> >  
> > +				if (!(va->flags & DRM_GPUVA_REPEAT))
> > +					n.gem.offset += ls_range + req->va.range;
> > +
> >  				ret = op_remap_cb(ops, priv, &p, &n, &u);
> >  				if (ret)
> >  					return ret;
> > @@ -2250,7 +2302,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> >  					.va.addr = req_end,
> >  					.va.range = end - req_end,
> >  					.gem.obj = obj,
> > -					.gem.offset = offset + req_end - addr,
> > +					.gem.range = va->gem.range,
> > +					.gem.offset = offset,
> >  					.flags = va->flags,
> >  				};
> >  				struct drm_gpuva_op_unmap u = {
> > @@ -2258,6 +2311,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
> >  					.keep = merge,
> >  				};
> >  
> > +				if (!(va->flags & DRM_GPUVA_REPEAT))
> > +					n.gem.offset += req_end - addr;
> > +
> >  				ret = op_remap_cb(ops, priv, NULL, &n, &u);
> >  				if (ret)
> >  					return ret;
> > @@ -2294,6 +2350,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
> >  			prev.va.addr = addr;
> >  			prev.va.range = req_addr - addr;
> >  			prev.gem.obj = obj;
> > +			prev.gem.range = va->gem.range;
> >  			prev.gem.offset = offset;
> >  			prev.flags = va->flags;
> >  
> > @@ -2304,7 +2361,10 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
> >  			next.va.addr = req_end;
> >  			next.va.range = end - req_end;
> >  			next.gem.obj = obj;
> > -			next.gem.offset = offset + (req_end - addr);
> > +			prev.gem.range = va->gem.range;
> > +			next.gem.offset = offset;
> > +			if (!(va->flags & DRM_GPUVA_REPEAT))
> > +				next.gem.offset += req_end - addr;
> >  			next.flags = va->flags;
> >  
> >  			next_split = true;
> > diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
> > index f77a89e791f1..629e8508f99f 100644
> > --- a/include/drm/drm_gpuvm.h
> > +++ b/include/drm/drm_gpuvm.h
> > @@ -56,10 +56,19 @@ enum drm_gpuva_flags {
> >  	 */
> >  	DRM_GPUVA_SPARSE = (1 << 1),
> >  
> > +	/**
> > +	 * @DRM_GPUVA_REPEAT:
> > +	 *
> > +	 * Flag indicating that the &drm_gpuva is a mapping of a GEM
> > +	 * portion repeated multiple times to fill the virtual address  
> 
> "of a GEM object with a certain range that is repeated multiple times to ..."



More information about the Intel-xe mailing list