[PATCH 13/13] drm: remove drm_gem_(un)lock_reservations
Christian König
ckoenig.leichtzumerken at gmail.com
Thu May 4 11:51:59 UTC 2023
Not used any more.
Signed-off-by: Christian König <christian.koenig at amd.com>
---
drivers/gpu/drm/drm_gem.c | 78 --------------------------
drivers/gpu/drm/scheduler/sched_main.c | 5 +-
include/drm/drm_gem.h | 4 --
3 files changed, 2 insertions(+), 85 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 1a5a2cd0d4ec..6666cd411002 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1214,84 +1214,6 @@ void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
}
EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
-/**
- * drm_gem_lock_reservations - Sets up the ww context and acquires
- * the lock on an array of GEM objects.
- *
- * Once you've locked your reservations, you'll want to set up space
- * for your shared fences (if applicable), submit your job, then
- * drm_gem_unlock_reservations().
- *
- * @objs: drm_gem_objects to lock
- * @count: Number of objects in @objs
- * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
- * part of tracking this set of locked reservations.
- */
-int
-drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
- struct ww_acquire_ctx *acquire_ctx)
-{
- int contended = -1;
- int i, ret;
-
- ww_acquire_init(acquire_ctx, &reservation_ww_class);
-
-retry:
- if (contended != -1) {
- struct drm_gem_object *obj = objs[contended];
-
- ret = dma_resv_lock_slow_interruptible(obj->resv,
- acquire_ctx);
- if (ret) {
- ww_acquire_fini(acquire_ctx);
- return ret;
- }
- }
-
- for (i = 0; i < count; i++) {
- if (i == contended)
- continue;
-
- ret = dma_resv_lock_interruptible(objs[i]->resv,
- acquire_ctx);
- if (ret) {
- int j;
-
- for (j = 0; j < i; j++)
- dma_resv_unlock(objs[j]->resv);
-
- if (contended != -1 && contended >= i)
- dma_resv_unlock(objs[contended]->resv);
-
- if (ret == -EDEADLK) {
- contended = i;
- goto retry;
- }
-
- ww_acquire_fini(acquire_ctx);
- return ret;
- }
- }
-
- ww_acquire_done(acquire_ctx);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_gem_lock_reservations);
-
-void
-drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
- struct ww_acquire_ctx *acquire_ctx)
-{
- int i;
-
- for (i = 0; i < count; i++)
- dma_resv_unlock(objs[i]->resv);
-
- ww_acquire_fini(acquire_ctx);
-}
-EXPORT_SYMBOL(drm_gem_unlock_reservations);
-
/**
* drm_gem_lru_init - initialize a LRU
*
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index b09cdacfd062..2d8249148926 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -794,9 +794,8 @@ EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
* @write: whether the job might write the object (so we need to depend on
* shared fences in the reservation object).
*
- * This should be called after drm_gem_lock_reservations() on your array of
- * GEM objects used in the job but before updating the reservations with your
- * own fences.
+ * This should be called after locking your GEM objects used in the job but
+ * before updating the reservations with your own fences.
*
* Returns:
* 0 on success, or an error on failing to expand the array.
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index b8efd836edef..7e027688a83d 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -476,10 +476,6 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
bool wait_all, unsigned long timeout);
-int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
- struct ww_acquire_ctx *acquire_ctx);
-void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
- struct ww_acquire_ctx *acquire_ctx);
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset);
--
2.34.1
More information about the amd-gfx
mailing list