[RFC PATCH 06/18] drm/ttm: Add public buffer eviction/uneviction functions
Friedrich Vock
friedrich.vock at gmx.de
Wed Apr 24 16:56:56 UTC 2024
For now, they are only used internally inside TTM, but this will change
with the introduction of dynamic buffer priorities.
Signed-off-by: Friedrich Vock <friedrich.vock at gmx.de>
---
drivers/gpu/drm/ttm/ttm_bo.c | 168 ++++++++++++++++++++++++++++++++++-
include/drm/ttm/ttm_bo.h | 6 ++
2 files changed, 172 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3b89fabc2f00a..3047c763eb4eb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -166,6 +166,111 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
return ret;
}
+/**
+ * Fetches the next BO from the manager's list of evicted BOs.
+ * bdev->unevict_lock should be held when calling this function.
+ */
+static struct ttm_buffer_object *ttm_next_evicted_bo(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ struct ttm_buffer_object *cursor)
+{
+ struct ttm_buffer_object *bo = NULL;
+
+ if (cursor)
+ cursor = list_next_entry(cursor, evicted);
+ else
+ cursor = list_first_entry(&man->evicted, struct ttm_buffer_object, evicted);
+
+ if (!list_entry_is_head(cursor, &man->evicted, evicted))
+ bo = ttm_bo_get_unless_zero(cursor);
+ return bo;
+}
+
+void ttm_mem_unevict_evicted(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ bool interruptible)
+{
+ struct ttm_buffer_object *evicted_bo = NULL, *next_evicted_bo = NULL;
+ struct ttm_operation_ctx ctx;
+ int ret;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.interruptible = interruptible;
+ ctx.no_evict = true;
+
+ spin_lock(&bdev->unevict_lock);
+ evicted_bo = ttm_next_evicted_bo(bdev, man, NULL);
+ spin_unlock(&bdev->unevict_lock);
+
+ while (evicted_bo) {
+ if (interruptible)
+ ret = dma_resv_lock_interruptible(
+ evicted_bo->base.resv, NULL);
+ else
+ ret = dma_resv_lock(evicted_bo->base.resv,
+ NULL);
+ if (ret) {
+ ttm_bo_put(evicted_bo);
+ break;
+ }
+
+ /* If we raced with another thread (and lost), the
+ * other thread already removed the buffer from the
+ * list. In that case, we need to start over because
+ * our current cursor got removed.
+ */
+ if (evicted_bo->evicted_type == TTM_NUM_MEM_TYPES)
+ ret = 0;
+ else
+ ret = ttm_bo_try_unevict(evicted_bo, &ctx);
+
+ next_evicted_bo = ret ? evicted_bo : NULL;
+
+ spin_lock(&bdev->unevict_lock);
+ next_evicted_bo = ttm_next_evicted_bo(bdev, man,
+ next_evicted_bo);
+ spin_unlock(&bdev->unevict_lock);
+
+ dma_resv_unlock(evicted_bo->base.resv);
+ ttm_bo_put(evicted_bo);
+
+ evicted_bo = next_evicted_bo;
+ }
+}
+EXPORT_SYMBOL(ttm_mem_unevict_evicted);
+
+struct ttm_mem_unevict_work {
+ struct work_struct work;
+ struct ttm_device *bdev;
+ struct ttm_resource_manager *man;
+};
+
+static void ttm_mem_unevict_work(struct work_struct *work)
+{
+ struct ttm_mem_unevict_work *unevict_work;
+
+ unevict_work = container_of(work, typeof(*unevict_work), work);
+
+ ttm_mem_unevict_evicted(unevict_work->bdev, unevict_work->man,
+ false);
+}
+
+static void ttm_mem_queue_unevict(struct ttm_device *bdev,
+ struct ttm_resource_manager *man)
+{
+ struct ttm_mem_unevict_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, ttm_mem_unevict_work);
+ work->bdev = bdev;
+ work->man = man;
+ queue_work_node(bdev->pool.nid, bdev->wq, &work->work);
+}
+
/*
* Call bo::reserved.
* Will release GPU memory type usage on destruction.
@@ -176,6 +281,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
+ struct ttm_resource_manager *man = NULL;
+ struct ttm_device *bdev = bo->bdev;
+
+ if (bo->resource)
+ man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
+
if (bo->bdev->funcs->delete_mem_notify)
bo->bdev->funcs->delete_mem_notify(bo);
if (bo->evicted_type != TTM_NUM_MEM_TYPES) {
@@ -187,6 +298,9 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_bo_tt_destroy(bo);
ttm_resource_free(bo, &bo->resource);
+
+ if (man)
+ ttm_mem_queue_unevict(bdev, man);
}
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
@@ -432,8 +546,7 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
return 0;
}
-static int ttm_bo_evict(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx)
+int ttm_bo_evict(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
{
int evicted_type = bo->resource->mem_type;
struct ttm_device *bdev = bo->bdev;
@@ -499,6 +612,57 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
return ret;
}
+int ttm_bo_try_unevict(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
+{
+ struct ttm_resource_manager *man;
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource *unevict_mem;
+ struct ttm_placement placement;
+ struct ttm_place hop;
+ int ret = 0;
+
+ dma_resv_assert_held(bo->base.resv);
+
+ man = ttm_manager_type(bdev, bo->evicted_type);
+
+ if (bo->deleted)
+ goto out;
+
+ placement.num_placement = 0;
+ placement.num_busy_placement = 0;
+ bdev->funcs->unevict_flags(bo, &placement);
+
+ if (!placement.num_placement && !placement.num_busy_placement)
+ return -ENOSPC;
+
+ ret = ttm_bo_mem_space(bo, &placement, &unevict_mem, ctx);
+ if (ret)
+ return ret;
+
+ do {
+ ret = ttm_bo_handle_move_mem(bo, unevict_mem, true, ctx, &hop);
+ if (ret != -EMULTIHOP)
+ break;
+
+ ret = ttm_bo_bounce_temp_buffer(bo, &unevict_mem, ctx, &hop);
+ } while (!ret);
+
+ if (ret)
+ ttm_resource_free(bo, &unevict_mem);
+
+out:
+ if (!ret) {
+ spin_lock(&bdev->unevict_lock);
+ list_del_init(&bo->evicted);
+ man->evicted_bytes -= bo->base.size;
+ spin_unlock(&bdev->unevict_lock);
+ bo->evicted_type = TTM_NUM_MEM_TYPES;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_try_unevict);
+
/**
* ttm_bo_eviction_valuable
*
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index a8f21092403d6..8f4e6366c0417 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -370,6 +370,9 @@ void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
+int ttm_bo_evict(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx);
+int ttm_bo_try_unevict(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx);
int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
enum ttm_bo_type type, struct ttm_placement *placement,
uint32_t alignment, struct ttm_operation_ctx *ctx,
@@ -395,6 +398,9 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket);
+void ttm_mem_unevict_evicted(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ bool interruptible);
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf);
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
--
2.44.0
More information about the amd-gfx
mailing list