[PATCH 3/4] drm/amdgpu: use new scheduler accounting
Lucas Stach
l.stach at pengutronix.de
Mon Jul 1 17:14:46 UTC 2024
From: Christian König <ckoenig.leichtzumerken at gmail.com>
Instead of implementing this ourself.
Signed-off-by: Christian König <christian.koenig at amd.com>
Signed-off-by: Lucas Stach <l.stach at pengutronix.de>
---
v2:
- rebased to v6.10-rc1
- adapted to match new function names
---
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 52 ++++---------------------
1 file changed, 8 insertions(+), 44 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 56f2428813e8..392f51e0b2e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -166,41 +166,6 @@ static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
return hw_prio;
}
-/* Calculate the time spend on the hw */
-static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)
-{
- struct drm_sched_fence *s_fence;
-
- if (!fence)
- return ns_to_ktime(0);
-
- /* When the fence is not even scheduled it can't have spend time */
- s_fence = to_drm_sched_fence(fence);
- if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))
- return ns_to_ktime(0);
-
- /* When it is still running account how much already spend */
- if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags))
- return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
-
- return ktime_sub(s_fence->finished.timestamp,
- s_fence->scheduled.timestamp);
-}
-
-static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx,
- struct amdgpu_ctx_entity *centity)
-{
- ktime_t res = ns_to_ktime(0);
- uint32_t i;
-
- spin_lock(&ctx->ring_lock);
- for (i = 0; i < amdgpu_sched_jobs; i++) {
- res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));
- }
- spin_unlock(&ctx->ring_lock);
- return res;
-}
-
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
const u32 ring)
{
@@ -272,18 +237,17 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_device *adev,
struct amdgpu_ctx_entity *entity)
{
- ktime_t res = ns_to_ktime(0);
+ ktime_t res;
int i;
if (!entity)
- return res;
+ return res = ns_to_ktime(0);
- for (i = 0; i < amdgpu_sched_jobs; ++i) {
- res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i]));
+ for (i = 0; i < amdgpu_sched_jobs; ++i)
dma_fence_put(entity->fences[i]);
- }
amdgpu_xcp_release_sched(adev, entity);
+ res = drm_sched_entity_time_spent(&entity->entity);
drm_sched_entity_destroy(&entity->entity);
kfree(entity);
return res;
@@ -748,9 +712,6 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
centity->sequence++;
spin_unlock(&ctx->ring_lock);
- atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)),
- &ctx->mgr->time_spend[centity->hw_ip]);
-
dma_fence_put(other);
return seq;
}
@@ -930,12 +891,15 @@ void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) {
struct amdgpu_ctx_entity *centity;
+ struct drm_sched_entity *entity;
ktime_t spend;
centity = ctx->entities[hw_ip][i];
if (!centity)
continue;
- spend = amdgpu_ctx_entity_time(ctx, centity);
+
+ entity = ¢ity->entity;
+ spend = drm_sched_entity_time_spent(entity);
usage[hw_ip] = ktime_add(usage[hw_ip], spend);
}
}
--
2.39.2
More information about the amd-gfx
mailing list