[Intel-gfx] [PATCH] drm/i915/display: Use dma_fence interfaces instead of i915_sw_fence

Ville Syrjälä ville.syrjala at linux.intel.com
Wed Oct 18 15:35:41 UTC 2023


On Wed, Oct 18, 2023 at 05:23:00PM +0200, Maarten Lankhorst wrote:
> 
> 
> On 2023-10-18 17:19, Ville Syrjälä wrote:
> > On Mon, Oct 16, 2023 at 11:08:03AM +0300, Jouni Högander wrote:
> >> We are preparing for Xe driver. Xe driver doesn't have i915_sw_fence
> >> implementation. Lets drop i915_sw_fence usage from display code and
> >> use dma_fence interfaces directly.
> >>
> >> For this purpose stack dma fences from related objects into old and new
> >> plane states using drm_gem_plane_helper_prepare_fb. Then wait for these
> >> stacked fences during atomic commit.
> >>
> >> There is no be need for separate GPU reset handling in
> >> intel_atomic_commit_fence_wait as the fences are signaled when GPU hang is
> >> detected and GPU is being reset.
> >>
> >> Cc: Ville Syrjälä <ville.syrjala at linux.intel.com>
> >> Cc: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
> >> Cc: José Roberto de Souza <jose.souza at intel.com>
> >>
> >> Signed-off-by: Jouni Högander <jouni.hogander at intel.com>
> >> ---
> >>   drivers/gpu/drm/i915/display/intel_atomic.c   |  3 -
> >>   .../gpu/drm/i915/display/intel_atomic_plane.c | 49 +++---------
> >>   drivers/gpu/drm/i915/display/intel_display.c  | 78 ++++++-------------
> >>   .../drm/i915/display/intel_display_types.h    |  2 -
> >>   4 files changed, 37 insertions(+), 95 deletions(-)
> >>
> >> diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
> >> index 5d18145da279..ec0d5168b503 100644
> >> --- a/drivers/gpu/drm/i915/display/intel_atomic.c
> >> +++ b/drivers/gpu/drm/i915/display/intel_atomic.c
> >> @@ -331,9 +331,6 @@ void intel_atomic_state_free(struct drm_atomic_state *_state)
> >>   
> >>   	drm_atomic_state_default_release(&state->base);
> >>   	kfree(state->global_objs);
> >> -
> >> -	i915_sw_fence_fini(&state->commit_ready);
> >> -
> >>   	kfree(state);
> >>   }
> >>   
> >> diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
> >> index b1074350616c..d4f9168ec42c 100644
> >> --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
> >> +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
> >> @@ -32,6 +32,7 @@
> >>    */
> >>   
> >>   #include <drm/drm_atomic_helper.h>
> >> +#include <drm/drm_gem_atomic_helper.h>
> >>   #include <drm/drm_blend.h>
> >>   #include <drm/drm_fourcc.h>
> >>   
> >> @@ -1035,7 +1036,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
> >>   	struct intel_atomic_state *state =
> >>   		to_intel_atomic_state(new_plane_state->uapi.state);
> >>   	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
> >> -	const struct intel_plane_state *old_plane_state =
> >> +	struct intel_plane_state *old_plane_state =
> >>   		intel_atomic_get_old_plane_state(state, plane);
> >>   	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
> >>   	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
> >> @@ -1057,56 +1058,30 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
> >>   		 * This should only fail upon a hung GPU, in which case we
> >>   		 * can safely continue.
> >>   		 */
> >> -		if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
> >> -			ret = i915_sw_fence_await_reservation(&state->commit_ready,
> >> -							      old_obj->base.resv,
> >> -							      false, 0,
> >> -							      GFP_KERNEL);
> >> +		if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state) &&
> >> +		    !dma_resv_test_signaled(old_obj->base.resv,
> >> +					    dma_resv_usage_rw(false))) {
> >> +			ret = drm_gem_plane_helper_prepare_fb(_plane, &old_plane_state->uapi);
> >>   			if (ret < 0)
> >>   				return ret;
> >>   		}
> >>   	}
> >>   
> >> -	if (new_plane_state->uapi.fence) { /* explicit fencing */
> >> -		i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
> >> -					     &attr);
> >> -		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
> >> -						    new_plane_state->uapi.fence,
> >> -						    i915_fence_timeout(dev_priv),
> >> -						    GFP_KERNEL);
> >> -		if (ret < 0)
> >> -			return ret;
> >> -	}
> >> -
> >>   	if (!obj)
> >>   		return 0;
> >>   
> >> -
> >>   	ret = intel_plane_pin_fb(new_plane_state);
> >>   	if (ret)
> >>   		return ret;
> >>   
> >> -	i915_gem_object_wait_priority(obj, 0, &attr);
> >> -
> >> -	if (!new_plane_state->uapi.fence) { /* implicit fencing */
> >> -		struct dma_resv_iter cursor;
> >> -		struct dma_fence *fence;
> >> +	ret = drm_gem_plane_helper_prepare_fb(_plane, &new_plane_state->uapi);
> > 
> > I don't think we can use that as is due to bigjoiner stuff.
> > I think we'd need a slightly lower level variant that takes
> > the fb+fence in explicitly instead of the full plane state.
> > 
> > And I suppose we already have a slight bug here where only the
> > master pipe's plane will consult the explicit fence and the rest
> > will take the implicit sync path.
> Why would bigjoiner fail? If bigjoiner happens, the uapi fb will be 
> fenced at least once.

Hmm. Yeah, I suppose that should cover it since we don't consider
plane visibility anywhere.

And, I guess I was wrong about the existing bug as well since
there should be no uapi.fb set on the slave planes, and so no
extra implicit sync fence will be added.

-- 
Ville Syrjälä
Intel


More information about the Intel-gfx mailing list