[Intel-gfx] [PATCH 4/6] drm/i915: Move i915_vma_move_to_active() to i915_vma.c
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Mon Jul 2 11:41:56 UTC 2018
On 29/06/2018 23:54, Chris Wilson wrote:
> i915_vma_move_to_active() has grown beyond its execbuf origins, and
> should take its rightful place in i915_vma.c as a method for i915_vma!
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/i915_drv.h | 3 --
> drivers/gpu/drm/i915/i915_gem_execbuffer.c | 61 ----------------------
> drivers/gpu/drm/i915/i915_vma.c | 61 ++++++++++++++++++++++
> drivers/gpu/drm/i915/i915_vma.h | 4 ++
> 4 files changed, 65 insertions(+), 64 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index cd8f69a00e86..1c04872890d4 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -3099,9 +3099,6 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
> }
>
> int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
> -int __must_check i915_vma_move_to_active(struct i915_vma *vma,
> - struct i915_request *rq,
> - unsigned int flags);
> int i915_gem_dumb_create(struct drm_file *file_priv,
> struct drm_device *dev,
> struct drm_mode_create_dumb *args);
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 97136e4ce91d..3f0c612d42e7 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1868,67 +1868,6 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
> return true;
> }
>
> -static void export_fence(struct i915_vma *vma,
> - struct i915_request *rq,
> - unsigned int flags)
> -{
> - struct reservation_object *resv = vma->resv;
> -
> - /*
> - * Ignore errors from failing to allocate the new fence, we can't
> - * handle an error right now. Worst case should be missed
> - * synchronisation leading to rendering corruption.
> - */
> - reservation_object_lock(resv, NULL);
> - if (flags & EXEC_OBJECT_WRITE)
> - reservation_object_add_excl_fence(resv, &rq->fence);
> - else if (reservation_object_reserve_shared(resv) == 0)
> - reservation_object_add_shared_fence(resv, &rq->fence);
> - reservation_object_unlock(resv);
> -}
> -
> -int i915_vma_move_to_active(struct i915_vma *vma,
> - struct i915_request *rq,
> - unsigned int flags)
> -{
> - struct drm_i915_gem_object *obj = vma->obj;
> - const unsigned int idx = rq->engine->id;
> -
> - lockdep_assert_held(&rq->i915->drm.struct_mutex);
> - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
> -
> - /*
> - * Add a reference if we're newly entering the active list.
> - * The order in which we add operations to the retirement queue is
> - * vital here: mark_active adds to the start of the callback list,
> - * such that subsequent callbacks are called first. Therefore we
> - * add the active reference first and queue for it to be dropped
> - * *last*.
> - */
> - if (!i915_vma_is_active(vma))
> - obj->active_count++;
> - i915_vma_set_active(vma, idx);
> - i915_gem_active_set(&vma->last_read[idx], rq);
> - list_move_tail(&vma->vm_link, &vma->vm->active_list);
> -
> - obj->write_domain = 0;
> - if (flags & EXEC_OBJECT_WRITE) {
> - obj->write_domain = I915_GEM_DOMAIN_RENDER;
> -
> - if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
> - i915_gem_active_set(&obj->frontbuffer_write, rq);
> -
> - obj->read_domains = 0;
> - }
> - obj->read_domains |= I915_GEM_GPU_DOMAINS;
> -
> - if (flags & EXEC_OBJECT_NEEDS_FENCE)
> - i915_gem_active_set(&vma->last_fence, rq);
> -
> - export_fence(vma, rq, flags);
> - return 0;
> -}
> -
> static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
> {
> u32 *cs;
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index d0e606e9b27a..7635c27e7e8b 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -859,6 +859,67 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
> list_del(&vma->obj->userfault_link);
> }
>
> +static void export_fence(struct i915_vma *vma,
> + struct i915_request *rq,
> + unsigned int flags)
> +{
> + struct reservation_object *resv = vma->resv;
> +
> + /*
> + * Ignore errors from failing to allocate the new fence, we can't
> + * handle an error right now. Worst case should be missed
> + * synchronisation leading to rendering corruption.
> + */
> + reservation_object_lock(resv, NULL);
> + if (flags & EXEC_OBJECT_WRITE)
> + reservation_object_add_excl_fence(resv, &rq->fence);
> + else if (reservation_object_reserve_shared(resv) == 0)
> + reservation_object_add_shared_fence(resv, &rq->fence);
> + reservation_object_unlock(resv);
> +}
> +
> +int i915_vma_move_to_active(struct i915_vma *vma,
> + struct i915_request *rq,
> + unsigned int flags)
> +{
> + struct drm_i915_gem_object *obj = vma->obj;
> + const unsigned int idx = rq->engine->id;
> +
> + lockdep_assert_held(&rq->i915->drm.struct_mutex);
> + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
> +
> + /*
> + * Add a reference if we're newly entering the active list.
> + * The order in which we add operations to the retirement queue is
> + * vital here: mark_active adds to the start of the callback list,
> + * such that subsequent callbacks are called first. Therefore we
> + * add the active reference first and queue for it to be dropped
> + * *last*.
> + */
> + if (!i915_vma_is_active(vma))
> + obj->active_count++;
> + i915_vma_set_active(vma, idx);
> + i915_gem_active_set(&vma->last_read[idx], rq);
> + list_move_tail(&vma->vm_link, &vma->vm->active_list);
> +
> + obj->write_domain = 0;
> + if (flags & EXEC_OBJECT_WRITE) {
> + obj->write_domain = I915_GEM_DOMAIN_RENDER;
> +
> + if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
> + i915_gem_active_set(&obj->frontbuffer_write, rq);
> +
> + obj->read_domains = 0;
> + }
> + obj->read_domains |= I915_GEM_GPU_DOMAINS;
> +
> + if (flags & EXEC_OBJECT_NEEDS_FENCE)
> + i915_gem_active_set(&vma->last_fence, rq);
> +
> + export_fence(vma, rq, flags);
> + return 0;
> +}
> +
> int i915_vma_unbind(struct i915_vma *vma)
> {
> unsigned long active;
> diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
> index 66a228931517..a218b689e418 100644
> --- a/drivers/gpu/drm/i915/i915_vma.h
> +++ b/drivers/gpu/drm/i915/i915_vma.h
> @@ -215,6 +215,10 @@ static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
> return vma->active & BIT(engine);
> }
>
> +int __must_check i915_vma_move_to_active(struct i915_vma *vma,
> + struct i915_request *rq,
> + unsigned int flags);
> +
> static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
> {
> GEM_BUG_ON(!i915_vma_is_ggtt(vma));
>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Regards,
Tvrtko
More information about the Intel-gfx
mailing list