[PATCH drm-next 03/14] drm: manager to keep track of GPUs VA mappings

Danilo Krummrich dakr at redhat.com
Sat Jan 28 01:51:14 UTC 2023


On Fri, Jan 27, 2023 at 12:24:07AM +0000, Matthew Brost wrote:
> On Wed, Jan 18, 2023 at 07:12:45AM +0100, Danilo Krummrich wrote:
> > This adds the infrastructure for a manager implementation to keep track
> > of GPU virtual address (VA) mappings.
> > 
> > New UAPIs, motivated by Vulkan sparse memory bindings graphics drivers
> > start implementing, allow userspace applications to request multiple and
> > arbitrary GPU VA mappings of buffer objects. The DRM GPU VA manager is
> > intended to serve the following purposes in this context.
> > 
> > 1) Provide a dedicated range allocator to track GPU VA allocations and
> >    mappings, making use of the drm_mm range allocator.
> > 
> > 2) Generically connect GPU VA mappings to their backing buffers, in
> >    particular DRM GEM objects.
> > 
> > 3) Provide a common implementation to perform more complex mapping
> >    operations on the GPU VA space. In particular splitting and merging
> >    of GPU VA mappings, e.g. for intersecting mapping requests or partial
> >    unmap requests.
> > 
> > Idea-suggested-by: Dave Airlie <airlied at redhat.com>
> > Signed-off-by: Danilo Krummrich <dakr at redhat.com>
> 
> <snip>
> 
> > +++ b/drivers/gpu/drm/drm_gpuva_mgr.c
> 
> <snip>
> 
> > +struct drm_gpuva *
> > +drm_gpuva_find(struct drm_gpuva_manager *mgr,
> > +	       u64 addr, u64 range)
> > +{
> > +	struct drm_gpuva *va;
> > +
> > +	drm_gpuva_for_each_va_in_range(va, mgr, addr, range) {
> 
> Last argument should be: range + addr, right?
> 

Thanks, good catch.

> > +		if (va->node.start == addr &&
> > +		    va->node.size == range)
> > +			return va;
> > +	}
> > +
> > +	return NULL;
> > +}
> > +EXPORT_SYMBOL(drm_gpuva_find);
> > +
> > +/**
> > + * drm_gpuva_find_prev - find the &drm_gpuva before the given address
> > + * @mgr: the &drm_gpuva_manager to search in
> > + * @start: the given GPU VA's start address
> > + *
> > + * Find the adjacent &drm_gpuva before the GPU VA with given &start address.
> > + *
> > + * Note that if there is any free space between the GPU VA mappings no mapping
> > + * is returned.
> > + *
> > + * Returns: a pointer to the found &drm_gpuva or NULL if none was found
> > + */
> > +struct drm_gpuva *
> > +drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start)
> > +{
> > +	struct drm_mm_node *node;
> > +
> > +	if (start <= mgr->mm_start ||
> > +	    start > (mgr->mm_start + mgr->mm_range))
> > +		return NULL;
> > +
> > +	node = __drm_mm_interval_first(&mgr->va_mm, start - 1, start);
> > +	if (node == &mgr->va_mm.head_node)
> > +		return NULL;
> > +
> > +	return (struct drm_gpuva *)node;
> > +}
> > +EXPORT_SYMBOL(drm_gpuva_find_prev);
> > +
> > +/**
> > + * drm_gpuva_find_next - find the &drm_gpuva after the given address
> > + * @mgr: the &drm_gpuva_manager to search in
> > + * @end: the given GPU VA's end address
> > + *
> > + * Find the adjacent &drm_gpuva after the GPU VA with given &end address.
> > + *
> > + * Note that if there is any free space between the GPU VA mappings no mapping
> > + * is returned.
> > + *
> > + * Returns: a pointer to the found &drm_gpuva or NULL if none was found
> > + */
> > +struct drm_gpuva *
> > +drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end)
> > +{
> > +	struct drm_mm_node *node;
> > +
> > +	if (end < mgr->mm_start ||
> > +	    end >= (mgr->mm_start + mgr->mm_range))
> > +		return NULL;
> > +
> > +	node = __drm_mm_interval_first(&mgr->va_mm, end, end + 1);
> > +	if (node == &mgr->va_mm.head_node)
> > +		return NULL;
> > +
> > +	return (struct drm_gpuva *)node;
> > +}
> > +EXPORT_SYMBOL(drm_gpuva_find_next);
> > +
> > +/**
> > + * drm_gpuva_region_insert - insert a &drm_gpuva_region
> > + * @mgr: the &drm_gpuva_manager to insert the &drm_gpuva in
> > + * @reg: the &drm_gpuva_region to insert
> > + * @addr: the start address of the GPU VA
> > + * @range: the range of the GPU VA
> > + *
> > + * Insert a &drm_gpuva_region with a given address and range into a
> > + * &drm_gpuva_manager.
> > + *
> > + * Returns: 0 on success, negative error code on failure.
> > + */
> > +int
> > +drm_gpuva_region_insert(struct drm_gpuva_manager *mgr,
> > +			struct drm_gpuva_region *reg,
> > +			u64 addr, u64 range)
> > +{
> > +	int ret;
> > +
> > +	ret = drm_mm_insert_node_in_range(&mgr->region_mm, &reg->node,
> > +					  range, 0,
> > +					  0, addr,
> > +					  addr + range,
> > +					  DRM_MM_INSERT_LOW|
> > +					  DRM_MM_INSERT_ONCE);
> > +	if (ret)
> > +		return ret;
> > +
> > +	reg->mgr = mgr;
> > +
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL(drm_gpuva_region_insert);
> > +
> > +/**
> > + * drm_gpuva_region_destroy - destroy a &drm_gpuva_region
> > + * @mgr: the &drm_gpuva_manager holding the region
> > + * @reg: the &drm_gpuva to destroy
> > + *
> > + * This removes the given &reg from the underlaying range allocator.
> > + */
> > +void
> > +drm_gpuva_region_destroy(struct drm_gpuva_manager *mgr,
> > +			 struct drm_gpuva_region *reg)
> > +{
> > +	struct drm_gpuva *va;
> > +
> > +	drm_gpuva_for_each_va_in_range(va, mgr,
> > +				       reg->node.start,
> > +				       reg->node.size) {
> 
> Last argument should be: reg->node.start + reg->node.size, right?

Thanks, pushed the fix to new-uapi-drm-next-fixes.

> 
> Matt
> 
> > +		WARN(1, "GPU VA region must be empty on destroy.\n");
> > +		return;
> > +	}
> > +
> > +	if (&reg->node == &mgr->kernel_alloc_node) {
> > +		WARN(1, "Can't destroy kernel reserved region.\n");
> > +		return;
> > +	}
> > +
> > +	drm_mm_remove_node(&reg->node);
> > +}
> > +EXPORT_SYMBOL(drm_gpuva_region_destroy);
> 



More information about the dri-devel mailing list