[PATCH] drm/vkms: Fix race condition around accessing frame number

Daniel Vetter daniel at ffwll.ch
Fri Aug 31 08:41:40 UTC 2018


On Fri, Aug 24, 2018 at 02:16:34AM +0300, Haneen Mohammed wrote:
> crtc_state is accessed by both vblank_handle() and the ordered
> work_struct handle vkms_crc_work_handle() to retrieve and or update
> the frame number for computed CRC.
> 
> Since work_struct can fail, add frame_end to account for missing frame
> numbers.
> 
> use atomic_t witth appropriate flags for synchronization between hrtimer
> callback and ordered work_struct handle since spinlock can't be used
> with work_struct handle and mutex can't be used with hrtimer callback.
> 
> This patch passes the following subtests from igt kms_pipe_crc_basic test:
> bad-source, read-crc-pipe-A, read-crc-pipe-A-frame-sequence,
> nonblocking-crc-pipe-A, nonblocking-crc-pipe-A-frame-sequence
> 
> Signed-off-by: Haneen Mohammed <hamohammed.sa at gmail.com>

So atomic_t is probably the greatest trap in the linux kernel. It sounds
like the right thing, but in 99% of all case you want to use it it isn't.
The trouble is that atomic_t is _very_ unordered, the only thing it
guarantees is that atomic_t transactions to the _same_ variable are
consistent. But anything else can be reordered at will.

This is very confusing since the new C++ atomic standards has fully
ordered atomics as the default, and you expressedly need to ask for the
weakly ordered ones. In linux you need to sprinkle epic amounts of
smb_barrier* and similar things around them to make atomic_t behave like a
"normal" C++ atomic type.

tldr; atomic_t is good for special refcounting needs, when the normal
refcount_t doesn't cut it. Not much else.

What usually should be done:
- Use normal u64 (to match the vblank counter size) instead of atomic_t
  here.
- Make sure all access is protect by an appropriate spinlock.

> ---
>  drivers/gpu/drm/vkms/vkms_crc.c  | 33 ++++++++++++++++++++++++++++++--
>  drivers/gpu/drm/vkms/vkms_crtc.c | 13 +++++++++++--
>  drivers/gpu/drm/vkms/vkms_drv.h  |  6 ++++--
>  3 files changed, 46 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
> index ed47d67cecd6..4a1ba5b7886a 100644
> --- a/drivers/gpu/drm/vkms/vkms_crc.c
> +++ b/drivers/gpu/drm/vkms/vkms_crc.c
> @@ -34,6 +34,15 @@ static uint32_t _vkms_get_crc(struct vkms_crc_data *crc_data)
>  	return crc;
>  }
>  
> +/**
> + * vkms_crc_work_handle - ordered work_struct to compute CRC
> + *
> + * @work: work_struct
> + *
> + * Work handler for computing CRCs. work_struct scheduled in
> + * an ordered workqueue that's periodically scheduled to run by
> + * _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state().
> + */
>  void vkms_crc_work_handle(struct work_struct *work)
>  {
>  	struct vkms_crtc_state *crtc_state = container_of(work,
> @@ -45,8 +54,18 @@ void vkms_crc_work_handle(struct work_struct *work)
>  						output);
>  	struct vkms_crc_data *primary_crc = NULL;
>  	struct drm_plane *plane;
> -
>  	u32 crc32 = 0;
> +	u32 frame_start, frame_end;
> +
> +	frame_start = atomic_read(&crtc_state->frame_start);
> +	frame_end = atomic_read(&crtc_state->frame_end);
> +	/* _vblank_handle() hasn't updated frame_start yet */
> +	if (!frame_start) {

I think if we go with u64 we can ignore the issues for wrap-arround, since
that will simply never happen. But a comment would be good.

Aside from the atomic_t issue I think this looks good.
-Daniel

> +		return;
> +	} else if (frame_start == frame_end) {
> +		atomic_set(&crtc_state->frame_start, 0);
> +		return;
> +	}
>  
>  	drm_for_each_plane(plane, &vdev->drm) {
>  		struct vkms_plane_state *vplane_state;
> @@ -67,7 +86,17 @@ void vkms_crc_work_handle(struct work_struct *work)
>  	if (primary_crc)
>  		crc32 = _vkms_get_crc(primary_crc);
>  
> -	drm_crtc_add_crc_entry(crtc, true, crtc_state->n_frame, &crc32);
> +	frame_end = drm_crtc_accurate_vblank_count(crtc);
> +
> +	/* queue_work can fail to schedule crc_work; add crc for
> +	 * missing frames
> +	 */
> +	while (frame_start <= frame_end)
> +		drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
> +
> +	/* to avoid using the same value again */
> +	atomic_set(&crtc_state->frame_end, frame_end);
> +	atomic_set(&crtc_state->frame_start, 0);
>  }
>  
>  static int vkms_crc_parse_source(const char *src_name, bool *enabled)
> diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
> index 9d0b1a325a78..a170677acd46 100644
> --- a/drivers/gpu/drm/vkms/vkms_crtc.c
> +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
> @@ -22,8 +22,17 @@ static void _vblank_handle(struct vkms_output *output)
>  		DRM_ERROR("vkms failure on handling vblank");
>  
>  	if (state && output->crc_enabled) {
> -		state->n_frame = drm_crtc_accurate_vblank_count(crtc);
> -		queue_work(output->crc_workq, &state->crc_work);
> +		u32 frame = drm_crtc_accurate_vblank_count(crtc);
> +
> +		/* update frame_start only if a queued vkms_crc_work_handle has
> +		 * read the data
> +		 */
> +		if (!atomic_read(&state->frame_start))
> +			atomic_set(&state->frame_start, frame);
> +
> +		ret = queue_work(output->crc_workq, &state->crc_work);
> +		if (!ret)
> +			DRM_WARN("failed to queue vkms_crc_work_handle");
>  	}
>  
>  	spin_unlock(&output->lock);
> diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
> index 2017a2ccc43d..4a3956a0549e 100644
> --- a/drivers/gpu/drm/vkms/vkms_drv.h
> +++ b/drivers/gpu/drm/vkms/vkms_drv.h
> @@ -39,12 +39,14 @@ struct vkms_plane_state {
>   * vkms_crtc_state - Driver specific CRTC state
>   * @base: base CRTC state
>   * @crc_work: work struct to compute and add CRC entries
> - * @n_frame: frame number for computed CRC
> + * @n_frame_start: start frame number for computed CRC
> + * @n_frame_end: end frame number for computed CRC
>   */
>  struct vkms_crtc_state {
>  	struct drm_crtc_state base;
>  	struct work_struct crc_work;
> -	unsigned int n_frame;
> +	atomic_t frame_start;
> +	atomic_t frame_end;
>  };
>  
>  struct vkms_output {
> -- 
> 2.17.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch


More information about the dri-devel mailing list