[CI 1/3] drm/ttm: Allow TTM LRU list nodes of different types
Thomas Hellström
thomas.hellstrom at linux.intel.com
Tue Feb 6 12:17:14 UTC 2024
To be able to handle list unlocking while traversing the LRU
list, we want the iterators not only to point to the next
position of the list traversal, but to insert themselves as
list nodes at that point to work around the next node
disappearing while pointing to it.
These list nodes need to be easily distinguishable from other
list nodes so that others traversing the list can skip
over them.
So declare a struct ttm_lru_item, with a struct list_head member
and a type enum. This will slightly increase the size of a
struct ttm_resource.
Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/ttm/ttm_device.c | 13 ++++--
drivers/gpu/drm/ttm/ttm_resource.c | 68 +++++++++++++++++++++---------
include/drm/ttm/ttm_resource.h | 30 ++++++++++++-
3 files changed, 87 insertions(+), 24 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 76027960054f..f27406e851e5 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -270,17 +270,22 @@ EXPORT_SYMBOL(ttm_device_fini);
static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
struct list_head *list)
{
- struct ttm_resource *res;
+ struct ttm_lru_item *lru;
spin_lock(&bdev->lru_lock);
- while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
- struct ttm_buffer_object *bo = res->bo;
+ while ((lru = list_first_entry_or_null(list, typeof(*lru), link))) {
+ struct ttm_buffer_object *bo;
+
+ if (!ttm_lru_item_is_res(lru))
+ continue;
+
+ bo = ttm_lru_item_to_res(lru)->bo;
/* Take ref against racing releases once lru_lock is unlocked */
if (!ttm_bo_get_unless_zero(bo))
continue;
- list_del_init(&res->lru);
+ list_del_init(&bo->resource->lru.link);
spin_unlock(&bdev->lru_lock);
if (bo->ttm)
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index fb14f7716cf8..a347130570ee 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -69,8 +69,8 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->last->bo->base.resv);
man = ttm_manager_type(pos->first->bo->bdev, i);
- list_bulk_move_tail(&man->lru[j], &pos->first->lru,
- &pos->last->lru);
+ list_bulk_move_tail(&man->lru[j], &pos->first->lru.link,
+ &pos->last->lru.link);
}
}
}
@@ -83,14 +83,36 @@ ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
return &bulk->pos[res->mem_type][res->bo->priority];
}
+static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_prev_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
+static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_next_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
/* Move the resource to the tail of the bulk move range */
static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
struct ttm_resource *res)
{
if (pos->last != res) {
if (pos->first == res)
- pos->first = list_next_entry(res, lru);
- list_move(&res->lru, &pos->last->lru);
+ pos->first = ttm_lru_next_res(res);
+ list_move(&res->lru.link, &pos->last->lru.link);
pos->last = res;
}
}
@@ -120,11 +142,11 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
pos->first = NULL;
pos->last = NULL;
} else if (pos->first == res) {
- pos->first = list_next_entry(res, lru);
+ pos->first = ttm_lru_next_res(res);
} else if (pos->last == res) {
- pos->last = list_prev_entry(res, lru);
+ pos->last = ttm_lru_prev_res(res);
} else {
- list_move(&res->lru, &pos->last->lru);
+ list_move(&res->lru.link, &pos->last->lru.link);
}
}
@@ -153,7 +175,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
lockdep_assert_held(&bo->bdev->lru_lock);
if (bo->pin_count) {
- list_move_tail(&res->lru, &bdev->pinned);
+ list_move_tail(&res->lru.link, &bdev->pinned);
} else if (bo->bulk_move) {
struct ttm_lru_bulk_move_pos *pos =
@@ -164,7 +186,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
struct ttm_resource_manager *man;
man = ttm_manager_type(bdev, res->mem_type);
- list_move_tail(&res->lru, &man->lru[bo->priority]);
+ list_move_tail(&res->lru.link, &man->lru[bo->priority]);
}
}
@@ -195,9 +217,9 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
if (bo->pin_count)
- list_add_tail(&res->lru, &bo->bdev->pinned);
+ list_add_tail(&res->lru.link, &bo->bdev->pinned);
else
- list_add_tail(&res->lru, &man->lru[bo->priority]);
+ list_add_tail(&res->lru.link, &man->lru[bo->priority]);
man->usage += res->size;
spin_unlock(&bo->bdev->lru_lock);
}
@@ -219,7 +241,7 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
struct ttm_device *bdev = man->bdev;
spin_lock(&bdev->lru_lock);
- list_del_init(&res->lru);
+ list_del_init(&res->lru.link);
man->usage -= res->size;
spin_unlock(&bdev->lru_lock);
}
@@ -462,14 +484,16 @@ struct ttm_resource *
ttm_resource_manager_first(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor)
{
- struct ttm_resource *res;
+ struct ttm_lru_item *lru;
lockdep_assert_held(&man->bdev->lru_lock);
for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ list_for_each_entry(lru, &man->lru[cursor->priority], link) {
+ if (ttm_lru_item_is_res(lru))
+ return ttm_lru_item_to_res(lru);
+ }
return NULL;
}
@@ -488,15 +512,21 @@ ttm_resource_manager_next(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor,
struct ttm_resource *res)
{
+ struct ttm_lru_item *lru = &res->lru;
+
lockdep_assert_held(&man->bdev->lru_lock);
- list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
- return res;
+ list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) {
+ if (ttm_lru_item_is_res(lru))
+ return ttm_lru_item_to_res(lru);
+ }
for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY;
++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ list_for_each_entry(lru, &man->lru[cursor->priority], link) {
+ if (ttm_lru_item_is_res(lru))
+ ttm_lru_item_to_res(lru);
+ }
return NULL;
}
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 1afa13f0c22b..eee6c20c3d1c 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -49,6 +49,28 @@ struct io_mapping;
struct sg_table;
struct scatterlist;
+enum ttm_lru_item_type {
+ TTM_LRU_RESOURCE,
+ TTM_LRU_HITCH
+};
+
+struct ttm_lru_item {
+ struct list_head link;
+ enum ttm_lru_item_type type;
+};
+
+static inline void ttm_lru_item_init(struct ttm_lru_item *item,
+ enum ttm_lru_item_type type)
+{
+ item->type = type;
+ INIT_LIST_HEAD(&item->link);
+}
+
+static inline bool ttm_lru_item_is_res(const struct ttm_lru_item *item)
+{
+ return item->type == TTM_LRU_RESOURCE;
+}
+
struct ttm_resource_manager_func {
/**
* struct ttm_resource_manager_func member alloc
@@ -217,9 +239,15 @@ struct ttm_resource {
/**
* @lru: Least recently used list, see &ttm_resource_manager.lru
*/
- struct list_head lru;
+ struct ttm_lru_item lru;
};
+static inline struct ttm_resource *
+ttm_lru_item_to_res(struct ttm_lru_item *item)
+{
+ return container_of(item, struct ttm_resource, lru);
+}
+
/**
* struct ttm_resource_cursor
*
--
2.43.0
More information about the Intel-gfx-trybot
mailing list