[PATCH 2/3] drm/radeon: let sa manager block for fences to wait for
Tom Stellard
thomas.stellard at amd.com
Fri Jul 13 07:14:28 PDT 2012
On Fri, Jul 13, 2012 at 04:08:14PM +0200, Christian König wrote:
> Otherwise we can encounter out of memory situations under extreme load.
>
> Signed-off-by: Christian König <deathsimple at vodafone.de>
> ---
> drivers/gpu/drm/radeon/radeon.h | 2 +-
> drivers/gpu/drm/radeon/radeon_sa.c | 72 +++++++++++++++++++++++++-----------
> 2 files changed, 51 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
> index 6715e4c..2cb355b 100644
> --- a/drivers/gpu/drm/radeon/radeon.h
> +++ b/drivers/gpu/drm/radeon/radeon.h
> @@ -362,7 +362,7 @@ struct radeon_bo_list {
> * alignment).
> */
> struct radeon_sa_manager {
> - spinlock_t lock;
> + wait_queue_head_t wq;
> struct radeon_bo *bo;
> struct list_head *hole;
> struct list_head flist[RADEON_NUM_RINGS];
> diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
> index 81dbb5b..b535fc4 100644
> --- a/drivers/gpu/drm/radeon/radeon_sa.c
> +++ b/drivers/gpu/drm/radeon/radeon_sa.c
> @@ -54,7 +54,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
> {
> int i, r;
>
> - spin_lock_init(&sa_manager->lock);
> + init_waitqueue_head(&sa_manager->wq);
> sa_manager->bo = NULL;
> sa_manager->size = size;
> sa_manager->domain = domain;
> @@ -211,6 +211,29 @@ static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
> return false;
> }
>
> +static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
> + unsigned size, unsigned align)
> +{
> + unsigned soffset, eoffset, wasted;
> + int i;
> +
> + for (i = 0; i < RADEON_NUM_RINGS; ++i) {
> + if (!list_empty(&sa_manager->flist[i])) {
> + return true;
> + }
> + }
> +
> + soffset = radeon_sa_bo_hole_soffset(sa_manager);
> + eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
> + wasted = (align - (soffset % align)) % align;
> +
> + if ((eoffset - soffset) >= (size + wasted)) {
> + return true;
> + }
> +
> + return false;
> +}
> +
This new function should come with a comment, per the new documentation
rules.
> static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
> struct radeon_fence **fences,
> unsigned *tries)
> @@ -297,8 +320,8 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
> INIT_LIST_HEAD(&(*sa_bo)->olist);
> INIT_LIST_HEAD(&(*sa_bo)->flist);
>
> - spin_lock(&sa_manager->lock);
> - do {
> + spin_lock(&sa_manager->wq.lock);
> + while(1) {
> for (i = 0; i < RADEON_NUM_RINGS; ++i) {
> fences[i] = NULL;
> tries[i] = 0;
> @@ -309,30 +332,34 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
>
> if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
> size, align)) {
> - spin_unlock(&sa_manager->lock);
> + spin_unlock(&sa_manager->wq.lock);
> return 0;
> }
>
> /* see if we can skip over some allocations */
> } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
>
> - if (block) {
> - spin_unlock(&sa_manager->lock);
> - r = radeon_fence_wait_any(rdev, fences, false);
> - spin_lock(&sa_manager->lock);
> - if (r) {
> - /* if we have nothing to wait for we
> - are practically out of memory */
> - if (r == -ENOENT) {
> - r = -ENOMEM;
> - }
> - goto out_err;
> - }
> + if (!block) {
> + break;
> + }
> +
> + spin_unlock(&sa_manager->wq.lock);
> + r = radeon_fence_wait_any(rdev, fences, false);
> + spin_lock(&sa_manager->wq.lock);
> + /* if we have nothing to wait for block */
> + if (r == -ENOENT) {
> + r = wait_event_interruptible_locked(
> + sa_manager->wq,
> + radeon_sa_event(sa_manager, size, align)
> + );
> + }
> + if (r) {
> + goto out_err;
> }
> - } while (block);
> + };
>
> out_err:
> - spin_unlock(&sa_manager->lock);
> + spin_unlock(&sa_manager->wq.lock);
> kfree(*sa_bo);
> *sa_bo = NULL;
> return r;
> @@ -348,7 +375,7 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
> }
>
> sa_manager = (*sa_bo)->manager;
> - spin_lock(&sa_manager->lock);
> + spin_lock(&sa_manager->wq.lock);
> if (fence && !radeon_fence_signaled(fence)) {
> (*sa_bo)->fence = radeon_fence_ref(fence);
> list_add_tail(&(*sa_bo)->flist,
> @@ -356,7 +383,8 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
> } else {
> radeon_sa_bo_remove_locked(*sa_bo);
> }
> - spin_unlock(&sa_manager->lock);
> + wake_up_all_locked(&sa_manager->wq);
> + spin_unlock(&sa_manager->wq.lock);
> *sa_bo = NULL;
> }
>
> @@ -366,7 +394,7 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
> {
> struct radeon_sa_bo *i;
>
> - spin_lock(&sa_manager->lock);
> + spin_lock(&sa_manager->wq.lock);
> list_for_each_entry(i, &sa_manager->olist, olist) {
> if (&i->olist == sa_manager->hole) {
> seq_printf(m, ">");
> @@ -381,6 +409,6 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
> }
> seq_printf(m, "\n");
> }
> - spin_unlock(&sa_manager->lock);
> + spin_unlock(&sa_manager->wq.lock);
> }
> #endif
> --
> 1.7.9.5
>
> _______________________________________________
> dri-devel mailing list
> dri-devel at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
More information about the dri-devel
mailing list