[PATCH v2] drm/amdgpu: set start timestamp of fence in the right place
Zhu, Jiadong
Jiadong.Zhu at amd.com
Wed Jul 10 07:54:28 UTC 2024
[AMD Official Use Only - AMD Internal Distribution Only]
> -----Original Message-----
> From: Christian König <ckoenig.leichtzumerken at gmail.com>
> Sent: Wednesday, July 10, 2024 3:17 PM
> To: Zhu, Jiadong <Jiadong.Zhu at amd.com>; amd-gfx at lists.freedesktop.org
> Subject: Re: [PATCH v2] drm/amdgpu: set start timestamp of fence in the
> right place
>
> Am 10.07.24 um 02:31 schrieb jiadong.zhu at amd.com:
> > From: Jiadong Zhu <Jiadong.Zhu at amd.com>
> >
> > The job's embedded fence is dma_fence which shall not be conversed to
> > amdgpu_fence.
>
> Good catch.
>
> > The start timestamp shall be saved on job for hw_fence.
>
> But NAK to that approach. Why do we need the start time here in the first
> place?
>
> Regards,
> Christian.
>
The start timestamp is used for ring mux to check if the fences are unsignaled for a period of time under mcbp scenarios (by calling amdgpu_fence_last_unsignaled_time_us).
Thanks,
Jiadong
> >
> > v2: optimize get_fence_start_time.
>
> >
> > Signed-off-by: Jiadong Zhu <Jiadong.Zhu at amd.com>
> > ---
> > drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 31
> ++++++++++++++++++++---
> > drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 3 +++
> > 2 files changed, 31 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> > index 2f24a6aa13bf..72bb007e48c8 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> > @@ -88,6 +88,31 @@ static inline struct amdgpu_fence
> *to_amdgpu_fence(struct dma_fence *f)
> > return NULL;
> > }
> >
> > +static inline void set_fence_start_time(struct dma_fence *f, ktime_t
> > +start_timestamp) {
> > + if (f->ops == &amdgpu_fence_ops) {
> > + struct amdgpu_fence *__f = container_of(f, struct
> amdgpu_fence,
> > +base);
> > +
> > + __f->start_timestamp = start_timestamp;
> > + } else if (f->ops == &amdgpu_job_fence_ops) {
> > + struct amdgpu_job *job = container_of(f, struct amdgpu_job,
> > +hw_fence);
> > +
> > + job->start_timestamp = start_timestamp;
> > + }
> > +}
> > +
> > +static inline ktime_t get_fence_start_time(struct dma_fence *f) {
> > + if (unlikely(f->ops == &amdgpu_fence_ops)) {
> > + struct amdgpu_fence *__f = container_of(f, struct
> amdgpu_fence,
> > +base);
> > +
> > + return __f->start_timestamp;
> > + }
> > + struct amdgpu_job *job = container_of(f, struct amdgpu_job,
> > +hw_fence);
> > +
> > + return job->start_timestamp;
> > +}
> > +
> > /**
> > * amdgpu_fence_write - write a fence value
> > *
> > @@ -197,7 +222,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring,
> struct dma_fence **f, struct amd
> > }
> > }
> >
> > - to_amdgpu_fence(fence)->start_timestamp = ktime_get();
> > + set_fence_start_time(fence, ktime_get());
> >
> > /* This function can't be called concurrently anyway, otherwise
> > * emitting the fence would mess up the hardware ring buffer.
> > @@ -428,7 +453,7 @@ u64
> amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring)
> > return 0;
> >
> > return ktime_us_delta(ktime_get(),
> > - to_amdgpu_fence(fence)->start_timestamp);
> > + get_fence_start_time(fence));
> > }
> >
> > /**
> > @@ -451,7 +476,7 @@ void
> amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring,
> uint32_t seq,
> > if (!fence)
> > return;
> >
> > - to_amdgpu_fence(fence)->start_timestamp = timestamp;
> > + set_fence_start_time(fence, timestamp);
> > }
> >
> > /**
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
> > index a963a25ddd62..3a73fe11a1ce 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
> > @@ -73,6 +73,9 @@ struct amdgpu_job {
> > uint64_t gds_va;
> > bool init_shadow;
> >
> > + /* start timestamp for hw_fence*/
> > + ktime_t start_timestamp;
> > +
> > /* job_run_counter >= 1 means a resubmit job */
> > uint32_t job_run_counter;
> >
More information about the amd-gfx
mailing list