[PATCH 02/36] drm/amdgpu: remove job parameter from amdgpu_fence_emit()
Alex Deucher
alexdeucher at gmail.com
Tue Jun 17 13:46:57 UTC 2025
On Tue, Jun 17, 2025 at 7:57 AM Christian König
<christian.koenig at amd.com> wrote:
>
> On 6/17/25 05:07, Alex Deucher wrote:
> > What we actually care about is the amdgpu_fence object
> > so pass that in explicitly to avoid possible mistakes
> > in the future.
> >
> > The job_run_counter handling can be safely removed at this
> > point as we no longer support job resubmission.
> >
> > Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
> > ---
> > drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 36 +++++++++--------------
> > drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 5 +++-
> > drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 4 +--
> > 3 files changed, 20 insertions(+), 25 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> > index 569e0e5373927..e88848c14491a 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> > @@ -114,14 +114,14 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
> > *
> > * @ring: ring the fence is associated with
> > * @f: resulting fence object
> > - * @job: job the fence is embedded in
> > + * @af: amdgpu fence input
> > * @flags: flags to pass into the subordinate .emit_fence() call
> > *
> > * Emits a fence command on the requested ring (all asics).
> > * Returns 0 on success, -ENOMEM on failure.
> > */
> > -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
> > - unsigned int flags)
> > +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
> > + struct amdgpu_fence *af, unsigned int flags)
> > {
> > struct amdgpu_device *adev = ring->adev;
> > struct dma_fence *fence;
> > @@ -130,36 +130,28 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
> > uint32_t seq;
> > int r;
> >
> > - if (job == NULL) {
> > - /* create a sperate hw fence */
> > + if (!af) {
> > + /* create a separate hw fence */
> > am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
> > if (am_fence == NULL)
> > return -ENOMEM;
>
> I think we should remove the output parameter as well.
>
> An amdgpu_fence can be trivially allocated by the caller.
Is there anything special about amdgpu_job_fence_ops vs
amdgpu_fence_ops other than the slab handling? I was worried I was
missing something about the fence lifetimes with amdgpu_job_{free,
free_cb}.
Alex
>
> Apart from that looks good to me.
>
> Regards,
> Christian.
>
> > } else {
> > - /* take use of job-embedded fence */
> > - am_fence = &job->hw_fence;
> > + am_fence = af;
> > }
> > fence = &am_fence->base;
> > am_fence->ring = ring;
> >
> > seq = ++ring->fence_drv.sync_seq;
> > - if (job && job->job_run_counter) {
> > - /* reinit seq for resubmitted jobs */
> > - fence->seqno = seq;
> > - /* TO be inline with external fence creation and other drivers */
> > + if (af) {
> > + dma_fence_init(fence, &amdgpu_job_fence_ops,
> > + &ring->fence_drv.lock,
> > + adev->fence_context + ring->idx, seq);
> > + /* Against remove in amdgpu_job_{free, free_cb} */
> > dma_fence_get(fence);
> > } else {
> > - if (job) {
> > - dma_fence_init(fence, &amdgpu_job_fence_ops,
> > - &ring->fence_drv.lock,
> > - adev->fence_context + ring->idx, seq);
> > - /* Against remove in amdgpu_job_{free, free_cb} */
> > - dma_fence_get(fence);
> > - } else {
> > - dma_fence_init(fence, &amdgpu_fence_ops,
> > - &ring->fence_drv.lock,
> > - adev->fence_context + ring->idx, seq);
> > - }
> > + dma_fence_init(fence, &amdgpu_fence_ops,
> > + &ring->fence_drv.lock,
> > + adev->fence_context + ring->idx, seq);
> > }
> >
> > amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
> > index 802743efa3b39..206b70acb29a0 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
> > @@ -128,6 +128,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
> > struct amdgpu_device *adev = ring->adev;
> > struct amdgpu_ib *ib = &ibs[0];
> > struct dma_fence *tmp = NULL;
> > + struct amdgpu_fence *af;
> > bool need_ctx_switch;
> > struct amdgpu_vm *vm;
> > uint64_t fence_ctx;
> > @@ -154,6 +155,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
> > csa_va = job->csa_va;
> > gds_va = job->gds_va;
> > init_shadow = job->init_shadow;
> > + af = &job->hw_fence;
> > } else {
> > vm = NULL;
> > fence_ctx = 0;
> > @@ -161,6 +163,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
> > csa_va = 0;
> > gds_va = 0;
> > init_shadow = false;
> > + af = NULL;
> > }
> >
> > if (!ring->sched.ready) {
> > @@ -282,7 +285,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
> > amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
> > }
> >
> > - r = amdgpu_fence_emit(ring, f, job, fence_flags);
> > + r = amdgpu_fence_emit(ring, f, af, fence_flags);
> > if (r) {
> > dev_err(adev->dev, "failed to emit fence (%d)\n", r);
> > if (job && job->vmid)
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
> > index e1f25218943a4..9ae522baad8e7 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
> > @@ -157,8 +157,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
> > void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
> > int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
> > void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
> > -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
> > - unsigned flags);
> > +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
> > + struct amdgpu_fence *af, unsigned int flags);
> > int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
> > uint32_t timeout);
> > bool amdgpu_fence_process(struct amdgpu_ring *ring);
>
More information about the amd-gfx
mailing list