[PATCH 8/9] drm/vmwgfx: Implement an infrastructure for read-coherent resources

Deepak Singh Rawat drawat at vmware.com
Mon Apr 22 20:12:12 UTC 2019


Minor nits below, otherwise

Reviewed-by: Deepak Rawat <drawat at vmware.com>

On Fri, 2019-04-12 at 09:04 -0700, Thomas Hellstrom wrote:
> Similar to write-coherent resources, make sure that from the user-
> space
> point of view, GPU rendered contents is automatically available for
> reading by the CPU.
> 
> Signed-off-by: Thomas Hellstrom <thellstrom at vmware.com>
> ---
>  drivers/gpu/drm/ttm/ttm_bo_vm.c               |   1 +
>  drivers/gpu/drm/vmwgfx/vmwgfx_drv.h           |   8 +-
>  drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c    |  69 +++++++++++-
>  drivers/gpu/drm/vmwgfx/vmwgfx_resource.c      | 102
> +++++++++++++++++-
>  drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h |   2 +
>  drivers/gpu/drm/vmwgfx/vmwgfx_validation.c    |   3 +-
>  6 files changed, 176 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c
> b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> index 3bd28fb97124..0065b138f450 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> @@ -42,6 +42,7 @@
>  #include <linux/uaccess.h>
>  #include <linux/mem_encrypt.h>
>  
> +
>  static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
>  				struct vm_fault *vmf)
>  {
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> index 81ebcd668038..00794415335e 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> @@ -96,6 +96,7 @@ struct vmw_fpriv {
>   * @map: Kmap object for semi-persistent mappings
>   * @res_prios: Eviction priority counts for attached resources
>   * @dirty: structure for user-space dirty-tracking
> + * @cleaning: Current validation sequence is cleaning.
>   */
>  struct vmw_buffer_object {
>  	struct ttm_buffer_object base;
> @@ -690,7 +691,8 @@ extern void vmw_resource_unreference(struct
> vmw_resource **p_res);
>  extern struct vmw_resource *vmw_resource_reference(struct
> vmw_resource *res);
>  extern struct vmw_resource *
>  vmw_resource_reference_unless_doomed(struct vmw_resource *res);
> -extern int vmw_resource_validate(struct vmw_resource *res, bool
> intr);
> +extern int vmw_resource_validate(struct vmw_resource *res, bool
> intr,
> +				 bool dirtying);
>  extern int vmw_resource_reserve(struct vmw_resource *res, bool
> interruptible,
>  				bool no_backup);
>  extern bool vmw_resource_needs_backup(const struct vmw_resource
> *res);
> @@ -734,6 +736,8 @@ void vmw_resource_mob_attach(struct vmw_resource
> *res);
>  void vmw_resource_mob_detach(struct vmw_resource *res);
>  void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t
> start,
>  			       pgoff_t end);
> +int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t
> start,
> +			pgoff_t end, pgoff_t *num_prefault);
>  
>  /**
>   * vmw_resource_mob_attached - Whether a resource currently has a
> mob attached
> @@ -1428,6 +1432,8 @@ int vmw_bo_dirty_add(struct vmw_buffer_object
> *vbo);
>  void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
>  void vmw_bo_dirty_clear_res(struct vmw_resource *res);
>  void vmw_bo_dirty_release(struct vmw_buffer_object *vbo);
> +void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
> +			pgoff_t start, pgoff_t end);
>  vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
>  vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
>  
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
> b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
> index 87e4a73b1175..773ff30a4b60 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
> @@ -153,7 +153,6 @@ static void vmw_bo_dirty_scan_mkwrite(struct
> vmw_buffer_object *vbo)
>  	}
>  }
>  
> -
>  /**
>   * vmw_bo_dirty_scan - Scan for dirty pages and add them to the
> dirty
>   * tracking structure
> @@ -171,6 +170,51 @@ void vmw_bo_dirty_scan(struct vmw_buffer_object
> *vbo)
>  		vmw_bo_dirty_scan_mkwrite(vbo);
>  }
>  
> +/**
> + * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages
> before
> + * an unmap_mapping_range operation.
> + * @vbo: The buffer object,
> + * @start: First page of the range within the buffer object.
> + * @end: Last page of the range within the buffer object + 1.
> + *
> + * If we're using the _PAGETABLE scan method, we may leak dirty
> pages
> + * when calling unmap_mapping_range(). This function makes sure we
> pick
> + * up all dirty pages.
> + */
> +static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
> +				   pgoff_t start, pgoff_t end)
> +{
> +	struct vmw_bo_dirty *dirty = vbo->dirty;
> +	unsigned long offset = drm_vma_node_start(&vbo->base.vma_node);
> +	struct address_space *mapping = vbo->base.bdev->dev_mapping;
> +
> +	if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end)
> +		return;
> +
> +	apply_as_wrprotect(mapping, start + offset, end - start);
> +	apply_as_clean(mapping, start + offset, end - start, offset,
> +		       &dirty->bitmap[0], &dirty->start, &dirty->end);
> +}
> +
> +/**
> + * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a
> bo
> + * @vbo: The buffer object,
> + * @start: First page of the range within the buffer object.
> + * @end: Last page of the range within the buffer object + 1.
> + *
> + * This is similar to ttm_bo_unmap_virtual_locked() except it takes
> a subrange.
> + */
> +void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
> +			pgoff_t start, pgoff_t end)
> +{
> +	unsigned long offset = drm_vma_node_start(&vbo->base.vma_node);
> +	struct address_space *mapping = vbo->base.bdev->dev_mapping;
> +
> +	vmw_bo_dirty_pre_unmap(vbo, start, end);
> +	unmap_shared_mapping_range(mapping, (offset + start) <<
> PAGE_SHIFT,
> +				   (loff_t) (end - start) <<
> PAGE_SHIFT);
> +}
> +
>  /**
>   * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object
>   * @vbo: The buffer object
> @@ -392,6 +436,26 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
>  	if (ret)
>  		return ret;
>  
> +	num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 :
> +		TTM_BO_VM_NUM_PREFAULT;
> +
> +	if (vbo->dirty) {
> +		pgoff_t allowed_prefault;
> +		unsigned long page_offset;
> +
> +		page_offset = vmf->pgoff - drm_vma_node_start(&bo-
> >vma_node);
> +		if (page_offset >= bo->num_pages ||
> +		    vmw_resources_clean(vbo, page_offset,
> +					page_offset + PAGE_SIZE,
> +					&allowed_prefault)) {
> +			ret = VM_FAULT_SIGBUS;
> +			goto out_unlock;
> +		}
> +
> +		num_prefault = min(num_prefault, allowed_prefault);
> +	}
> +
> +

Extra space

>  	/*
>  	 * This will cause mkwrite() to be called for each pte on
>  	 * write-enable vmas.
> @@ -399,12 +463,11 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault
> *vmf)
>  	if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
>  		cvma.vm_flags &= ~VM_WRITE;
>  
> -	num_prefault = (vma->vm_flags & VM_RAND_READ) ? 0 :
> -		TTM_BO_VM_NUM_PREFAULT;
>  	ret = ttm_bo_vm_fault_reserved(vmf, &cvma, num_prefault);
>  	if (ret == VM_FAULT_RETRY && !(vmf->flags &
> FAULT_FLAG_RETRY_NOWAIT))
>  		return ret;
>  
> +out_unlock:
>  	reservation_object_unlock(bo->resv);
>  	return ret;
>  }
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> index ff9fe5650468..30367cb06143 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> @@ -395,7 +395,8 @@ static int vmw_resource_buf_alloc(struct
> vmw_resource *res,
>   * should be retried once resources have been freed up.
>   */
>  static int vmw_resource_do_validate(struct vmw_resource *res,
> -				    struct ttm_validate_buffer
> *val_buf)
> +				    struct ttm_validate_buffer
> *val_buf,
> +				    bool dirtying)
>  {
>  	int ret = 0;
>  	const struct vmw_res_func *func = res->func;
> @@ -437,6 +438,15 @@ static int vmw_resource_do_validate(struct
> vmw_resource *res,
>  	 * the resource.
>  	 */
>  	if (res->dirty) {
> +		if (dirtying && !res->res_dirty) {
> +			pgoff_t start = res->backup_offset >>
> PAGE_SHIFT;
> +			pgoff_t end = __KERNEL_DIV_ROUND_UP
> +				(res->backup_offset + res->backup_size,
> +				 PAGE_SIZE);
> +
> +			vmw_bo_dirty_unmap(res->backup, start, end);
> +		}
> +
>  		vmw_bo_dirty_transfer_to_res(res);
>  		return func->dirty_sync(res);
>  	}
> @@ -680,6 +690,7 @@ static int vmw_resource_do_evict(struct
> ww_acquire_ctx *ticket,
>   *                         to the device.
>   * @res: The resource to make visible to the device.
>   * @intr: Perform waits interruptible if possible.
> + * @dirtying: Pending GPU operation will dirty the resource
>   *
>   * On succesful return, any backup DMA buffer pointed to by @res-
> >backup will
>   * be reserved and validated.
> @@ -689,7 +700,8 @@ static int vmw_resource_do_evict(struct
> ww_acquire_ctx *ticket,
>   * Return: Zero on success, -ERESTARTSYS if interrupted, negative
> error code
>   * on failure.
>   */
> -int vmw_resource_validate(struct vmw_resource *res, bool intr)
> +int vmw_resource_validate(struct vmw_resource *res, bool intr,
> +			  bool dirtying)
>  {
>  	int ret;
>  	struct vmw_resource *evict_res;
> @@ -706,7 +718,7 @@ int vmw_resource_validate(struct vmw_resource
> *res, bool intr)
>  	if (res->backup)
>  		val_buf.bo = &res->backup->base;
>  	do {
> -		ret = vmw_resource_do_validate(res, &val_buf);
> +		ret = vmw_resource_do_validate(res, &val_buf,
> dirtying);
>  		if (likely(ret != -EBUSY))
>  			break;
>  
> @@ -1006,7 +1018,7 @@ int vmw_resource_pin(struct vmw_resource *res,
> bool interruptible)
>  			/* Do we really need to pin the MOB as well? */
>  			vmw_bo_pin_reserved(vbo, true);
>  		}
> -		ret = vmw_resource_validate(res, interruptible);
> +		ret = vmw_resource_validate(res, interruptible, true);
>  		if (vbo)
>  			ttm_bo_unreserve(&vbo->base);
>  		if (ret)
> @@ -1081,3 +1093,85 @@ void vmw_resource_dirty_update(struct
> vmw_resource *res, pgoff_t start,
>  		res->func->dirty_range_add(res, start << PAGE_SHIFT,
>  					   end << PAGE_SHIFT);
>  }
> +
> +/**
> + * vmw_resources_clean - Clean resources intersecting a mob range
> + * @res_tree: Tree of resources attached to the mob

This doesn't match function signature

> + * @start: The mob page offset starting the range
> + * @end: The mob page offset ending the range
> + * @num_prefault: Returns how many pages including the first have
> been
> + * cleaned and are ok to prefault
> + */
> +int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t
> start,
> +			pgoff_t end, pgoff_t *num_prefault)
> +{
> +	struct rb_node *cur = vbo->res_tree.rb_node;
> +	struct vmw_resource *found = NULL;
> +	unsigned long res_start = start << PAGE_SHIFT;
> +	unsigned long res_end = end << PAGE_SHIFT;
> +	unsigned long last_cleaned = 0;
> +
> +	/*
> +	 * Find the resource with lowest backup_offset that intersects
> the
> +	 * range.
> +	 */
> +	while (cur) {
> +		struct vmw_resource *cur_res =
> +			container_of(cur, struct vmw_resource,
> mob_node);
> +
> +		if (cur_res->backup_offset >= res_end) {
> +			cur = cur->rb_left;
> +		} else if (cur_res->backup_offset + cur_res-
> >backup_size <=
> +			   res_start) {
> +			cur = cur->rb_right;
> +		} else {
> +			found = cur_res;

I didn't looked into how RB tree works but do you need to break the
loop when resource is found?

> +			cur = cur->rb_left;
> +		}
> +	}
> +
> +	/*
> +	 * In order of increasing backup_offset, clean dirty resorces
> +	 * intersecting the range.
> +	 */
> +	while (found) {
> +		if (found->res_dirty) {
> +			int ret;
> +
> +			if (!found->func->clean)
> +				return -EINVAL;
> +
> +			ret = found->func->clean(found);
> +			if (ret)
> +				return ret;
> +
> +			found->res_dirty = false;
> +		}
> +		last_cleaned = found->backup_offset + found-
> >backup_size;
> +		cur = rb_next(&found->mob_node);
> +		if (!cur)
> +			break;
> +
> +		found = container_of(cur, struct vmw_resource,
> mob_node);
> +		if (found->backup_offset >= res_end)
> +			break;
> +	}
> +
> +	/*
> +	 * Set number of pages allowed prefaulting and fence the buffer
> object
> +	 */
> +	*num_prefault = 1;
> +	if (last_cleaned > res_start) {
> +		struct ttm_buffer_object *bo = &vbo->base;
> +
> +		*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned -
> res_start,
> +						      PAGE_SIZE);
> +		vmw_bo_fence_single(bo, NULL);
> +		if (bo->moving)
> +			dma_fence_put(bo->moving);
> +		bo->moving = dma_fence_get
> +			(reservation_object_get_excl(bo->resv));
> +	}
> +
> +	return 0;
> +}
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
> b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
> index c85144286cfe..3b7438b2d289 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
> @@ -77,6 +77,7 @@ struct vmw_user_resource_conv {
>   * @dirty_sync:        Upload the dirty mob contents to the
> resource.
>   * @dirty_add_range:   Add a sequential dirty range to the resource
>   *                     dirty tracker.
> + * @clean:             Clean the resource.
>   */
>  struct vmw_res_func {
>  	enum vmw_res_type res_type;
> @@ -101,6 +102,7 @@ struct vmw_res_func {
>  	int (*dirty_sync)(struct vmw_resource *res);
>  	void (*dirty_range_add)(struct vmw_resource *res, size_t start,
>  				 size_t end);
> +	int (*clean)(struct vmw_resource *res);
>  };
>  
>  /**
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
> b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
> index 5b0c928bb5ba..81d9d7adc055 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
> @@ -644,7 +644,8 @@ int vmw_validation_res_validate(struct
> vmw_validation_context *ctx, bool intr)
>  		struct vmw_resource *res = val->res;
>  		struct vmw_buffer_object *backup = res->backup;
>  
> -		ret = vmw_resource_validate(res, intr);
> +		ret = vmw_resource_validate(res, intr, val->dirty_set
> &&
> +					    val->dirty);
>  		if (ret) {
>  			if (ret != -ERESTARTSYS)
>  				DRM_ERROR("Failed to validate
> resource.\n");
> -- 
> 2.20.1
> 



More information about the dri-devel mailing list