[PATCH 08/13] drm/qxl: switch to using drm_exec
Christian König
ckoenig.leichtzumerken at gmail.com
Thu May 4 11:51:54 UTC 2023
Just a straightforward conversion without any optimization.
Only compile tested for now.
Signed-off-by: Christian König <christian.koenig at amd.com>
---
drivers/gpu/drm/qxl/Kconfig | 1 +
drivers/gpu/drm/qxl/qxl_drv.h | 7 ++--
drivers/gpu/drm/qxl/qxl_release.c | 67 ++++++++++++++++---------------
3 files changed, 39 insertions(+), 36 deletions(-)
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index ca3f51c2a8fe..9c8e433be33e 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -5,6 +5,7 @@ config DRM_QXL
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
+ select DRM_EXEC
select CRC32
help
QXL virtual GPU for Spice virtualization desktop integration.
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index ea993d7162e8..3e732648b332 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -38,12 +38,12 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_exec.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_gem.h>
#include <drm/qxl_drm.h>
#include <drm/ttm/ttm_bo.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_placement.h>
#include "qxl_dev.h"
@@ -101,7 +101,8 @@ struct qxl_gem {
};
struct qxl_bo_list {
- struct ttm_validate_buffer tv;
+ struct qxl_bo *bo;
+ struct list_head list;
};
struct qxl_crtc {
@@ -151,7 +152,7 @@ struct qxl_release {
struct qxl_bo *release_bo;
uint32_t release_offset;
uint32_t surface_release_id;
- struct ww_acquire_ctx ticket;
+ struct drm_exec exec;
struct list_head bos;
};
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 368d26da0d6a..da7cd9cd58f9 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -121,13 +121,11 @@ qxl_release_free_list(struct qxl_release *release)
{
while (!list_empty(&release->bos)) {
struct qxl_bo_list *entry;
- struct qxl_bo *bo;
entry = container_of(release->bos.next,
- struct qxl_bo_list, tv.head);
- bo = to_qxl_bo(entry->tv.bo);
- qxl_bo_unref(&bo);
- list_del(&entry->tv.head);
+ struct qxl_bo_list, list);
+ qxl_bo_unref(&entry->bo);
+ list_del(&entry->list);
kfree(entry);
}
release->release_bo = NULL;
@@ -172,8 +170,8 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
{
struct qxl_bo_list *entry;
- list_for_each_entry(entry, &release->bos, tv.head) {
- if (entry->tv.bo == &bo->tbo)
+ list_for_each_entry(entry, &release->bos, list) {
+ if (entry->bo == bo)
return 0;
}
@@ -182,9 +180,8 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
return -ENOMEM;
qxl_bo_ref(bo);
- entry->tv.bo = &bo->tbo;
- entry->tv.num_shared = 0;
- list_add_tail(&entry->tv.head, &release->bos);
+ entry->bo = bo;
+ list_add_tail(&entry->list, &release->bos);
return 0;
}
@@ -221,21 +218,27 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
if (list_is_singular(&release->bos))
return 0;
- ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL);
- if (ret)
- return ret;
-
- list_for_each_entry(entry, &release->bos, tv.head) {
- struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
-
- ret = qxl_release_validate_bo(bo);
- if (ret) {
- ttm_eu_backoff_reservation(&release->ticket, &release->bos);
- return ret;
+ drm_exec_init(&release->exec, !no_intr);
+ drm_exec_while_not_all_locked(&release->exec) {
+ list_for_each_entry(entry, &release->bos, list) {
+ ret = drm_exec_prepare_obj(&release->exec,
+ &entry->bo->tbo.base,
+ 1);
+ drm_exec_break_on_contention(&release->exec);
+ if (ret)
+ goto error;
}
}
+
+ list_for_each_entry(entry, &release->bos, list) {
+ ret = qxl_release_validate_bo(entry->bo);
+ if (ret)
+ goto error;
+ }
return 0;
+error:
+ drm_exec_fini(&release->exec);
+ return ret;
}
void qxl_release_backoff_reserve_list(struct qxl_release *release)
@@ -245,7 +248,7 @@ void qxl_release_backoff_reserve_list(struct qxl_release *release)
if (list_is_singular(&release->bos))
return;
- ttm_eu_backoff_reservation(&release->ticket, &release->bos);
+ drm_exec_fini(&release->exec);
}
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
@@ -404,18 +407,18 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
- struct ttm_buffer_object *bo;
struct ttm_device *bdev;
- struct ttm_validate_buffer *entry;
+ struct qxl_bo_list *entry;
struct qxl_device *qdev;
+ struct qxl_bo *bo;
/* if only one object on the release its the release itself
since these objects are pinned no need to reserve */
if (list_is_singular(&release->bos) || list_empty(&release->bos))
return;
- bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
- bdev = bo->bdev;
+ bo = list_first_entry(&release->bos, struct qxl_bo_list, list)->bo;
+ bdev = bo->tbo.bdev;
qdev = container_of(bdev, struct qxl_device, mman.bdev);
/*
@@ -426,14 +429,12 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- list_for_each_entry(entry, &release->bos, head) {
+ list_for_each_entry(entry, &release->bos, list) {
bo = entry->bo;
- dma_resv_add_fence(bo->base.resv, &release->base,
+ dma_resv_add_fence(bo->tbo.base.resv, &release->base,
DMA_RESV_USAGE_READ);
- ttm_bo_move_to_lru_tail_unlocked(bo);
- dma_resv_unlock(bo->base.resv);
+ ttm_bo_move_to_lru_tail_unlocked(&bo->tbo);
}
- ww_acquire_fini(&release->ticket);
+ drm_exec_fini(&release->exec);
}
-
--
2.34.1
More information about the amd-gfx
mailing list