[PATCH 8/9] drm/ttm: allow bulk moves for all domains
Christian König
ckoenig.leichtzumerken at gmail.com
Wed Feb 9 08:40:58 UTC 2022
Not just TT and VRAM.
Signed-off-by: Christian König <christian.koenig at amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter at ffwll.ch>
Tested-by: Bas Nieuwenhuizen <bas at basnieuwenhuizen.nl>
---
drivers/gpu/drm/ttm/ttm_resource.c | 52 +++++++++---------------------
include/drm/ttm/ttm_device.h | 2 --
include/drm/ttm/ttm_resource.h | 4 +--
3 files changed, 17 insertions(+), 41 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index e3301f6277ba..5e732a509b4b 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -51,38 +51,24 @@ EXPORT_SYMBOL(ttm_lru_bulk_move_init);
*/
void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
{
- unsigned i;
-
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
- struct ttm_resource_manager *man;
+ unsigned i, j;
- if (!pos->first)
- continue;
+ for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
+ for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
+ struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
+ struct ttm_resource_manager *man;
- lockdep_assert_held(&pos->first->bo->bdev->lru_lock);
- dma_resv_assert_held(pos->first->bo->base.resv);
- dma_resv_assert_held(pos->last->bo->base.resv);
+ if (!pos->first)
+ continue;
- man = ttm_manager_type(pos->first->bo->bdev, TTM_PL_TT);
- list_bulk_move_tail(&man->lru[i], &pos->first->lru,
- &pos->last->lru);
- }
-
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
- struct ttm_resource_manager *man;
+ lockdep_assert_held(&pos->first->bo->bdev->lru_lock);
+ dma_resv_assert_held(pos->first->bo->base.resv);
+ dma_resv_assert_held(pos->last->bo->base.resv);
- if (!pos->first)
- continue;
-
- lockdep_assert_held(&pos->first->bo->bdev->lru_lock);
- dma_resv_assert_held(pos->first->bo->base.resv);
- dma_resv_assert_held(pos->last->bo->base.resv);
-
- man = ttm_manager_type(pos->first->bo->bdev, TTM_PL_VRAM);
- list_bulk_move_tail(&man->lru[i], &pos->first->lru,
- &pos->last->lru);
+ man = ttm_manager_type(pos->first->bo->bdev, i);
+ list_bulk_move_tail(&man->lru[j], &pos->first->lru,
+ &pos->last->lru);
+ }
}
}
EXPORT_SYMBOL(ttm_lru_bulk_move_tail);
@@ -122,15 +108,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res,
if (!bulk)
return;
- switch (res->mem_type) {
- case TTM_PL_TT:
- ttm_lru_bulk_move_set_pos(&bulk->tt[bo->priority], res);
- break;
-
- case TTM_PL_VRAM:
- ttm_lru_bulk_move_set_pos(&bulk->vram[bo->priority], res);
- break;
- }
+ ttm_lru_bulk_move_set_pos(&bulk->pos[res->mem_type][bo->priority], res);
}
/**
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index 0a4ddec78d8f..425150f35fbe 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -30,8 +30,6 @@
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_pool.h>
-#define TTM_NUM_MEM_TYPES 8
-
struct ttm_device;
struct ttm_placement;
struct ttm_buffer_object;
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index cc452a7aa016..5f93a16acfd6 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -37,6 +37,7 @@
#include <drm/ttm/ttm_kmap_iter.h>
#define TTM_MAX_BO_PRIORITY 4U
+#define TTM_NUM_MEM_TYPES 8
struct ttm_device;
struct ttm_resource_manager;
@@ -217,8 +218,7 @@ struct ttm_lru_bulk_move_pos {
* Helper structure for bulk moves on the LRU list.
*/
struct ttm_lru_bulk_move {
- struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
- struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
+ struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
};
/**
--
2.25.1
More information about the amd-gfx
mailing list