[Freedreno] [PATCH v2 3/5] drm: msm: Switch to use drm_gem_object reservation_object
Rob Clark
robdclark at gmail.com
Wed Feb 13 14:31:48 UTC 2019
On Sat, Feb 2, 2019 at 10:42 AM Rob Herring <robh at kernel.org> wrote:
>
> Now that the base struct drm_gem_object has a reservation_object, use it
> and remove the private BO one.
>
> We can't use the drm_gem_reservation_object_wait() helper for MSM
> because (in theory) msm_gem_cpu_prep() will also do some cache
> maintenance on the GEM object.
>
> Cc: Rob Clark <robdclark at gmail.com>
> Cc: David Airlie <airlied at linux.ie>
> Cc: Daniel Vetter <daniel at ffwll.ch>
> Cc: linux-arm-msm at vger.kernel.org
> Cc: dri-devel at lists.freedesktop.org
> Cc: freedreno at lists.freedesktop.org
> Signed-off-by: Rob Herring <robh at kernel.org>
Acked-by: Rob Clark <robdclark at gmail.com>
> ---
> drivers/gpu/drm/msm/msm_drv.c | 1 -
> drivers/gpu/drm/msm/msm_drv.h | 1 -
> drivers/gpu/drm/msm/msm_gem.c | 27 +++++++++------------------
> drivers/gpu/drm/msm/msm_gem_prime.c | 7 -------
> drivers/gpu/drm/msm/msm_gem_submit.c | 8 ++++----
> 5 files changed, 13 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
> index d2cdc7b553fe..522ddbd72250 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -1086,7 +1086,6 @@ static struct drm_driver msm_driver = {
> .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> .gem_prime_export = drm_gem_prime_export,
> .gem_prime_import = drm_gem_prime_import,
> - .gem_prime_res_obj = msm_gem_prime_res_obj,
> .gem_prime_pin = msm_gem_prime_pin,
> .gem_prime_unpin = msm_gem_prime_unpin,
> .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
> diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
> index 927e5d86f7c1..068f9172ad70 100644
> --- a/drivers/gpu/drm/msm/msm_drv.h
> +++ b/drivers/gpu/drm/msm/msm_drv.h
> @@ -300,7 +300,6 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
> void *msm_gem_prime_vmap(struct drm_gem_object *obj);
> void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
> int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
> -struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
> struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
> struct dma_buf_attachment *attach, struct sg_table *sg);
> int msm_gem_prime_pin(struct drm_gem_object *obj);
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index c8886d3071fa..d3973cc26522 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -672,14 +672,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
> int msm_gem_sync_object(struct drm_gem_object *obj,
> struct msm_fence_context *fctx, bool exclusive)
> {
> - struct msm_gem_object *msm_obj = to_msm_bo(obj);
> struct reservation_object_list *fobj;
> struct dma_fence *fence;
> int i, ret;
>
> - fobj = reservation_object_get_list(msm_obj->resv);
> + fobj = reservation_object_get_list(obj->resv);
> if (!fobj || (fobj->shared_count == 0)) {
> - fence = reservation_object_get_excl(msm_obj->resv);
> + fence = reservation_object_get_excl(obj->resv);
> /* don't need to wait on our own fences, since ring is fifo */
> if (fence && (fence->context != fctx->context)) {
> ret = dma_fence_wait(fence, true);
> @@ -693,7 +692,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
>
> for (i = 0; i < fobj->shared_count; i++) {
> fence = rcu_dereference_protected(fobj->shared[i],
> - reservation_object_held(msm_obj->resv));
> + reservation_object_held(obj->resv));
> if (fence->context != fctx->context) {
> ret = dma_fence_wait(fence, true);
> if (ret)
> @@ -711,9 +710,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
> WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
> msm_obj->gpu = gpu;
> if (exclusive)
> - reservation_object_add_excl_fence(msm_obj->resv, fence);
> + reservation_object_add_excl_fence(obj->resv, fence);
> else
> - reservation_object_add_shared_fence(msm_obj->resv, fence);
> + reservation_object_add_shared_fence(obj->resv, fence);
> list_del_init(&msm_obj->mm_list);
> list_add_tail(&msm_obj->mm_list, &gpu->active_list);
> }
> @@ -733,13 +732,12 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
>
> int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
> {
> - struct msm_gem_object *msm_obj = to_msm_bo(obj);
> bool write = !!(op & MSM_PREP_WRITE);
> unsigned long remain =
> op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
> long ret;
>
> - ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
> + ret = reservation_object_wait_timeout_rcu(obj->resv, write,
> true, remain);
> if (ret == 0)
> return remain == 0 ? -EBUSY : -ETIMEDOUT;
> @@ -771,7 +769,7 @@ static void describe_fence(struct dma_fence *fence, const char *type,
> void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
> {
> struct msm_gem_object *msm_obj = to_msm_bo(obj);
> - struct reservation_object *robj = msm_obj->resv;
> + struct reservation_object *robj = obj->resv;
> struct reservation_object_list *fobj;
> struct dma_fence *fence;
> struct msm_gem_vma *vma;
> @@ -883,9 +881,6 @@ void msm_gem_free_object(struct drm_gem_object *obj)
> put_pages(obj);
> }
>
> - if (msm_obj->resv == &msm_obj->_resv)
> - reservation_object_fini(msm_obj->resv);
> -
> drm_gem_object_release(obj);
>
> mutex_unlock(&msm_obj->lock);
> @@ -945,12 +940,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
> msm_obj->flags = flags;
> msm_obj->madv = MSM_MADV_WILLNEED;
>
> - if (resv) {
> - msm_obj->resv = resv;
> - } else {
> - msm_obj->resv = &msm_obj->_resv;
> - reservation_object_init(msm_obj->resv);
> - }
> + if (resv)
> + msm_obj->base.resv = resv;
>
> INIT_LIST_HEAD(&msm_obj->submit_entry);
> INIT_LIST_HEAD(&msm_obj->vmas);
> diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
> index 13403c6da6c7..60bb290700ce 100644
> --- a/drivers/gpu/drm/msm/msm_gem_prime.c
> +++ b/drivers/gpu/drm/msm/msm_gem_prime.c
> @@ -70,10 +70,3 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
> if (!obj->import_attach)
> msm_gem_put_pages(obj);
> }
> -
> -struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
> -{
> - struct msm_gem_object *msm_obj = to_msm_bo(obj);
> -
> - return msm_obj->resv;
> -}
> diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
> index 12b983fc0b56..df302521ec74 100644
> --- a/drivers/gpu/drm/msm/msm_gem_submit.c
> +++ b/drivers/gpu/drm/msm/msm_gem_submit.c
> @@ -173,7 +173,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
> msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
>
> if (submit->bos[i].flags & BO_LOCKED)
> - ww_mutex_unlock(&msm_obj->resv->lock);
> + ww_mutex_unlock(&msm_obj->base.resv->lock);
>
> if (backoff && !(submit->bos[i].flags & BO_VALID))
> submit->bos[i].iova = 0;
> @@ -196,7 +196,7 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
> contended = i;
>
> if (!(submit->bos[i].flags & BO_LOCKED)) {
> - ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
> + ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock,
> &submit->ticket);
> if (ret)
> goto fail;
> @@ -218,7 +218,7 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
> if (ret == -EDEADLK) {
> struct msm_gem_object *msm_obj = submit->bos[contended].obj;
> /* we lost out in a seqno race, lock and retry.. */
> - ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
> + ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock,
> &submit->ticket);
> if (!ret) {
> submit->bos[contended].flags |= BO_LOCKED;
> @@ -244,7 +244,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
> * strange place to call it. OTOH this is a
> * convenient can-fail point to hook it in.
> */
> - ret = reservation_object_reserve_shared(msm_obj->resv,
> + ret = reservation_object_reserve_shared(msm_obj->base.resv,
> 1);
> if (ret)
> return ret;
> --
> 2.19.1
>
More information about the Freedreno
mailing list